var/home/core/zuul-output/0000755000175000017500000000000015116137125014527 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015116153362015474 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005520657515116153353017717 0ustar rootrootDec 10 00:30:25 crc systemd[1]: Starting Kubernetes Kubelet... Dec 10 00:30:26 crc restorecon[4756]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 00:30:26 crc restorecon[4756]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 10 00:30:26 crc restorecon[4756]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Dec 10 00:30:27 crc kubenswrapper[4884]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 10 00:30:27 crc kubenswrapper[4884]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Dec 10 00:30:27 crc kubenswrapper[4884]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 10 00:30:27 crc kubenswrapper[4884]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 10 00:30:27 crc kubenswrapper[4884]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Dec 10 00:30:27 crc kubenswrapper[4884]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.037074 4884 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046030 4884 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046079 4884 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046088 4884 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046100 4884 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046109 4884 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046118 4884 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046129 4884 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046139 4884 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046148 4884 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046156 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046164 4884 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046173 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046181 4884 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046188 4884 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046197 4884 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046205 4884 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046212 4884 feature_gate.go:330] unrecognized feature gate: Example Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046220 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046227 4884 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046235 4884 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046243 4884 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046251 4884 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046258 4884 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046266 4884 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046275 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046283 4884 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046290 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046298 4884 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046306 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046314 4884 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046321 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046329 4884 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046337 4884 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046354 4884 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046363 4884 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046370 4884 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046378 4884 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046386 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046393 4884 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046408 4884 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046420 4884 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046456 4884 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046467 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046475 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046484 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046493 4884 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046501 4884 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046509 4884 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046517 4884 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046525 4884 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046534 4884 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046542 4884 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046549 4884 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046557 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046566 4884 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046574 4884 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046583 4884 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046591 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046599 4884 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046610 4884 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046619 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046629 4884 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046638 4884 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046646 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046656 4884 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046664 4884 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046675 4884 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046684 4884 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046692 4884 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046699 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.046707 4884 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.046865 4884 flags.go:64] FLAG: --address="0.0.0.0" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.046884 4884 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.046902 4884 flags.go:64] FLAG: --anonymous-auth="true" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.046914 4884 flags.go:64] FLAG: --application-metrics-count-limit="100" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.046927 4884 flags.go:64] FLAG: --authentication-token-webhook="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.046937 4884 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.046949 4884 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.046961 4884 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.046971 4884 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.046980 4884 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.046991 4884 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047001 4884 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047011 4884 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047020 4884 flags.go:64] FLAG: --cgroup-root="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047029 4884 flags.go:64] FLAG: --cgroups-per-qos="true" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047038 4884 flags.go:64] FLAG: --client-ca-file="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047047 4884 flags.go:64] FLAG: --cloud-config="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047056 4884 flags.go:64] FLAG: --cloud-provider="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047065 4884 flags.go:64] FLAG: --cluster-dns="[]" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047075 4884 flags.go:64] FLAG: --cluster-domain="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047084 4884 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047094 4884 flags.go:64] FLAG: --config-dir="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047104 4884 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047114 4884 flags.go:64] FLAG: --container-log-max-files="5" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047126 4884 flags.go:64] FLAG: --container-log-max-size="10Mi" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047135 4884 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047146 4884 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047156 4884 flags.go:64] FLAG: --containerd-namespace="k8s.io" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047167 4884 flags.go:64] FLAG: --contention-profiling="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047176 4884 flags.go:64] FLAG: --cpu-cfs-quota="true" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047187 4884 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047197 4884 flags.go:64] FLAG: --cpu-manager-policy="none" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047207 4884 flags.go:64] FLAG: --cpu-manager-policy-options="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047218 4884 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047227 4884 flags.go:64] FLAG: --enable-controller-attach-detach="true" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047236 4884 flags.go:64] FLAG: --enable-debugging-handlers="true" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047245 4884 flags.go:64] FLAG: --enable-load-reader="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047257 4884 flags.go:64] FLAG: --enable-server="true" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047267 4884 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047283 4884 flags.go:64] FLAG: --event-burst="100" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047293 4884 flags.go:64] FLAG: --event-qps="50" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047302 4884 flags.go:64] FLAG: --event-storage-age-limit="default=0" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047311 4884 flags.go:64] FLAG: --event-storage-event-limit="default=0" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047320 4884 flags.go:64] FLAG: --eviction-hard="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047342 4884 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047351 4884 flags.go:64] FLAG: --eviction-minimum-reclaim="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047360 4884 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047370 4884 flags.go:64] FLAG: --eviction-soft="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047379 4884 flags.go:64] FLAG: --eviction-soft-grace-period="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047388 4884 flags.go:64] FLAG: --exit-on-lock-contention="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047397 4884 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047406 4884 flags.go:64] FLAG: --experimental-mounter-path="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047415 4884 flags.go:64] FLAG: --fail-cgroupv1="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047424 4884 flags.go:64] FLAG: --fail-swap-on="true" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047467 4884 flags.go:64] FLAG: --feature-gates="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047480 4884 flags.go:64] FLAG: --file-check-frequency="20s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047489 4884 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047498 4884 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047508 4884 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047518 4884 flags.go:64] FLAG: --healthz-port="10248" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047527 4884 flags.go:64] FLAG: --help="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047536 4884 flags.go:64] FLAG: --hostname-override="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047545 4884 flags.go:64] FLAG: --housekeeping-interval="10s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047554 4884 flags.go:64] FLAG: --http-check-frequency="20s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047565 4884 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047573 4884 flags.go:64] FLAG: --image-credential-provider-config="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047582 4884 flags.go:64] FLAG: --image-gc-high-threshold="85" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047591 4884 flags.go:64] FLAG: --image-gc-low-threshold="80" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047600 4884 flags.go:64] FLAG: --image-service-endpoint="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047609 4884 flags.go:64] FLAG: --kernel-memcg-notification="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047618 4884 flags.go:64] FLAG: --kube-api-burst="100" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047628 4884 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047637 4884 flags.go:64] FLAG: --kube-api-qps="50" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047647 4884 flags.go:64] FLAG: --kube-reserved="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047656 4884 flags.go:64] FLAG: --kube-reserved-cgroup="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047664 4884 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047674 4884 flags.go:64] FLAG: --kubelet-cgroups="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047683 4884 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047692 4884 flags.go:64] FLAG: --lock-file="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047701 4884 flags.go:64] FLAG: --log-cadvisor-usage="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047711 4884 flags.go:64] FLAG: --log-flush-frequency="5s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047721 4884 flags.go:64] FLAG: --log-json-info-buffer-size="0" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047734 4884 flags.go:64] FLAG: --log-json-split-stream="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047743 4884 flags.go:64] FLAG: --log-text-info-buffer-size="0" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047753 4884 flags.go:64] FLAG: --log-text-split-stream="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047762 4884 flags.go:64] FLAG: --logging-format="text" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047771 4884 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047781 4884 flags.go:64] FLAG: --make-iptables-util-chains="true" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047790 4884 flags.go:64] FLAG: --manifest-url="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047801 4884 flags.go:64] FLAG: --manifest-url-header="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047822 4884 flags.go:64] FLAG: --max-housekeeping-interval="15s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047832 4884 flags.go:64] FLAG: --max-open-files="1000000" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047843 4884 flags.go:64] FLAG: --max-pods="110" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047852 4884 flags.go:64] FLAG: --maximum-dead-containers="-1" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047862 4884 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047871 4884 flags.go:64] FLAG: --memory-manager-policy="None" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047883 4884 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047892 4884 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047902 4884 flags.go:64] FLAG: --node-ip="192.168.126.11" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047912 4884 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047935 4884 flags.go:64] FLAG: --node-status-max-images="50" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047944 4884 flags.go:64] FLAG: --node-status-update-frequency="10s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047954 4884 flags.go:64] FLAG: --oom-score-adj="-999" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047963 4884 flags.go:64] FLAG: --pod-cidr="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047972 4884 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047986 4884 flags.go:64] FLAG: --pod-manifest-path="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.047995 4884 flags.go:64] FLAG: --pod-max-pids="-1" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048005 4884 flags.go:64] FLAG: --pods-per-core="0" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048014 4884 flags.go:64] FLAG: --port="10250" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048023 4884 flags.go:64] FLAG: --protect-kernel-defaults="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048032 4884 flags.go:64] FLAG: --provider-id="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048042 4884 flags.go:64] FLAG: --qos-reserved="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048051 4884 flags.go:64] FLAG: --read-only-port="10255" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048060 4884 flags.go:64] FLAG: --register-node="true" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048070 4884 flags.go:64] FLAG: --register-schedulable="true" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048078 4884 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048095 4884 flags.go:64] FLAG: --registry-burst="10" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048104 4884 flags.go:64] FLAG: --registry-qps="5" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048113 4884 flags.go:64] FLAG: --reserved-cpus="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048122 4884 flags.go:64] FLAG: --reserved-memory="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048134 4884 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048143 4884 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048153 4884 flags.go:64] FLAG: --rotate-certificates="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048162 4884 flags.go:64] FLAG: --rotate-server-certificates="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048171 4884 flags.go:64] FLAG: --runonce="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048180 4884 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048189 4884 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048198 4884 flags.go:64] FLAG: --seccomp-default="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048214 4884 flags.go:64] FLAG: --serialize-image-pulls="true" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048223 4884 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048233 4884 flags.go:64] FLAG: --storage-driver-db="cadvisor" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048242 4884 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048251 4884 flags.go:64] FLAG: --storage-driver-password="root" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048260 4884 flags.go:64] FLAG: --storage-driver-secure="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048269 4884 flags.go:64] FLAG: --storage-driver-table="stats" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048279 4884 flags.go:64] FLAG: --storage-driver-user="root" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048288 4884 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048297 4884 flags.go:64] FLAG: --sync-frequency="1m0s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048306 4884 flags.go:64] FLAG: --system-cgroups="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048315 4884 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048330 4884 flags.go:64] FLAG: --system-reserved-cgroup="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048339 4884 flags.go:64] FLAG: --tls-cert-file="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048347 4884 flags.go:64] FLAG: --tls-cipher-suites="[]" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048358 4884 flags.go:64] FLAG: --tls-min-version="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048367 4884 flags.go:64] FLAG: --tls-private-key-file="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048376 4884 flags.go:64] FLAG: --topology-manager-policy="none" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048385 4884 flags.go:64] FLAG: --topology-manager-policy-options="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048394 4884 flags.go:64] FLAG: --topology-manager-scope="container" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048403 4884 flags.go:64] FLAG: --v="2" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048414 4884 flags.go:64] FLAG: --version="false" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048426 4884 flags.go:64] FLAG: --vmodule="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048462 4884 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.048473 4884 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048696 4884 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048706 4884 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048715 4884 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048725 4884 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048733 4884 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048741 4884 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048748 4884 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048759 4884 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048768 4884 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048779 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048788 4884 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048796 4884 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048804 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048812 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048820 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048827 4884 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048835 4884 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048843 4884 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048851 4884 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048859 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048868 4884 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048875 4884 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048883 4884 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048890 4884 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048899 4884 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048906 4884 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048915 4884 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048922 4884 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048930 4884 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048938 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048946 4884 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048954 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048962 4884 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048969 4884 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048978 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048986 4884 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.048995 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049003 4884 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049012 4884 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049023 4884 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049034 4884 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049047 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049056 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049064 4884 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049073 4884 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049082 4884 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049090 4884 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049097 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049106 4884 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049117 4884 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049127 4884 feature_gate.go:330] unrecognized feature gate: Example Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049136 4884 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049144 4884 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049153 4884 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049163 4884 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049173 4884 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049183 4884 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049192 4884 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049200 4884 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049209 4884 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049218 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049227 4884 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049235 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049244 4884 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049252 4884 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049259 4884 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049267 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049275 4884 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049283 4884 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049291 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.049298 4884 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.049352 4884 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.059021 4884 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.059076 4884 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059175 4884 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059187 4884 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059193 4884 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059199 4884 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059204 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059208 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059211 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059215 4884 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059219 4884 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059223 4884 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059228 4884 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059232 4884 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059237 4884 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059241 4884 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059246 4884 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059250 4884 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059255 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059259 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059263 4884 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059267 4884 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059272 4884 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059276 4884 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059281 4884 feature_gate.go:330] unrecognized feature gate: Example Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059285 4884 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059290 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059295 4884 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059299 4884 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059305 4884 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059314 4884 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059319 4884 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059324 4884 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059330 4884 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059334 4884 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059339 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059346 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059350 4884 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059354 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059359 4884 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059363 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059368 4884 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059372 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059376 4884 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059380 4884 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059384 4884 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059390 4884 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059397 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059402 4884 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059408 4884 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059414 4884 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059419 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059424 4884 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059460 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059465 4884 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059469 4884 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059472 4884 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059476 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059480 4884 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059483 4884 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059487 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059493 4884 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059499 4884 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059505 4884 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059511 4884 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059524 4884 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059530 4884 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059535 4884 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059540 4884 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059545 4884 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059549 4884 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059553 4884 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059559 4884 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.059568 4884 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059745 4884 feature_gate.go:330] unrecognized feature gate: Example Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059755 4884 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059761 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059767 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059773 4884 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059778 4884 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059784 4884 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059790 4884 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059796 4884 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059802 4884 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059807 4884 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059813 4884 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059818 4884 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059824 4884 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059829 4884 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059834 4884 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059839 4884 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059845 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059850 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059856 4884 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059861 4884 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059866 4884 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059871 4884 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059877 4884 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059883 4884 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059888 4884 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059893 4884 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059898 4884 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059903 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059908 4884 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059913 4884 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059919 4884 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059924 4884 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059930 4884 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059938 4884 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059944 4884 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059949 4884 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059955 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059960 4884 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059966 4884 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059971 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059975 4884 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059980 4884 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059984 4884 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059988 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059993 4884 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.059998 4884 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060004 4884 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060010 4884 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060014 4884 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060020 4884 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060024 4884 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060031 4884 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060037 4884 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060042 4884 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060049 4884 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060056 4884 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060061 4884 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060066 4884 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060071 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060076 4884 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060082 4884 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060089 4884 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060094 4884 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060099 4884 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060104 4884 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060111 4884 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060117 4884 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060122 4884 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060127 4884 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.060133 4884 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.060142 4884 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.060949 4884 server.go:940] "Client rotation is on, will bootstrap in background" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.063920 4884 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.064021 4884 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.064713 4884 server.go:997] "Starting client certificate rotation" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.064738 4884 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.064950 4884 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-01 22:00:00.83843079 +0000 UTC Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.065099 4884 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.070995 4884 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.073510 4884 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.073562 4884 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.089367 4884 log.go:25] "Validated CRI v1 runtime API" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.111239 4884 log.go:25] "Validated CRI v1 image API" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.113963 4884 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.118209 4884 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-12-10-00-26-07-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.118266 4884 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.145167 4884 manager.go:217] Machine: {Timestamp:2025-12-10 00:30:27.143048233 +0000 UTC m=+0.221005420 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:bb0ea269-6375-48fa-bc24-bfb5f5739a28 BootID:9e5a05e6-2f2c-47fe-b87f-7c17a55bede5 Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:1f:f3:e5 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:1f:f3:e5 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:ff:61:b8 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:6d:17:15 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:f7:ce:72 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:35:63:9e Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:f2:83:be Speed:-1 Mtu:1496} {Name:eth10 MacAddress:22:10:d8:8e:de:6c Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:f6:e3:54:6d:02:fd Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.145700 4884 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.145940 4884 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.146945 4884 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.147286 4884 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.147350 4884 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.147780 4884 topology_manager.go:138] "Creating topology manager with none policy" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.147800 4884 container_manager_linux.go:303] "Creating device plugin manager" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.148078 4884 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.148147 4884 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.148426 4884 state_mem.go:36] "Initialized new in-memory state store" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.148593 4884 server.go:1245] "Using root directory" path="/var/lib/kubelet" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.149522 4884 kubelet.go:418] "Attempting to sync node with API server" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.149559 4884 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.149602 4884 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.149624 4884 kubelet.go:324] "Adding apiserver pod source" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.149667 4884 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.152550 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.152593 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.152676 4884 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.152711 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.152930 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.153862 4884 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.155040 4884 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.155870 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.155906 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.155919 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.155932 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.155951 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.155969 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.155981 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.156009 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.156023 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.156035 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.156079 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.156091 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.156648 4884 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.157303 4884 server.go:1280] "Started kubelet" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.157941 4884 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.157942 4884 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.158773 4884 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.159885 4884 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.159911 4884 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.159989 4884 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.160042 4884 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-03 08:54:07.465873322 +0000 UTC Dec 10 00:30:27 crc systemd[1]: Started Kubernetes Kubelet. Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.166859 4884 volume_manager.go:287] "The desired_state_of_world populator starts" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.166996 4884 volume_manager.go:289] "Starting Kubelet Volume Manager" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.167233 4884 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.170171 4884 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.171142 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="200ms" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.172186 4884 server.go:460] "Adding debug handlers to kubelet server" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.172164 4884 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.58:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187fb336dc3f296d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-10 00:30:27.157256557 +0000 UTC m=+0.235213684,LastTimestamp:2025-12-10 00:30:27.157256557 +0000 UTC m=+0.235213684,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.177185 4884 factory.go:55] Registering systemd factory Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.177227 4884 factory.go:221] Registration of the systemd container factory successfully Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.177708 4884 factory.go:153] Registering CRI-O factory Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.177722 4884 factory.go:221] Registration of the crio container factory successfully Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.177864 4884 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.177898 4884 factory.go:103] Registering Raw factory Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.177916 4884 manager.go:1196] Started watching for new ooms in manager Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.177953 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.178202 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.178796 4884 manager.go:319] Starting recovery of all containers Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.181954 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182008 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182019 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182033 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182043 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182053 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182065 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182079 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182091 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182102 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182113 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182123 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182138 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182151 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182163 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182172 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182182 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182193 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182212 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182222 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182232 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182243 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182254 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182266 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182277 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182287 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182302 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182313 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182324 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182334 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182345 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182389 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182399 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182410 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182420 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182444 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182455 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182465 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182477 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182489 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182501 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182512 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182523 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182534 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182546 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182559 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182575 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182590 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182601 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182612 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182624 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182637 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182656 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182668 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182681 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182699 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182711 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182724 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182739 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182752 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182765 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182777 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182788 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182800 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182811 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182822 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182834 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182847 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182858 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182870 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182884 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182899 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182912 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182924 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182935 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182946 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182959 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182972 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182984 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.182995 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183007 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183019 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183034 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183046 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183057 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183069 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183082 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183097 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183108 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183119 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183131 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183144 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183156 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183172 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183185 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183197 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183207 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183220 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183232 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183246 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183259 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183270 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183281 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183296 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183314 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183326 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183339 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183352 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183366 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183406 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183418 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183445 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183459 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183472 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183485 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183498 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183510 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183522 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183543 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183558 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183571 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183584 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183595 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183606 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183617 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183630 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183641 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183653 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183664 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183676 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183687 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183700 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183710 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183720 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183732 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183744 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183757 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183768 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183779 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183791 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183804 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183816 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183829 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183841 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183859 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183871 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183882 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183894 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183905 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183916 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183926 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183940 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183950 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183962 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183973 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.183985 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184001 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184013 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184023 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184035 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184046 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184059 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184070 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184083 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184094 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184106 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184117 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184133 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184144 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184156 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.184169 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185619 4884 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185646 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185697 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185712 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185727 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185743 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185771 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185783 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185795 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185807 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185818 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185851 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185869 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185883 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185894 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185907 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185918 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185930 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185941 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185956 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185969 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.185980 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186041 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186062 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186076 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186090 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186115 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186129 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186144 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186156 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186170 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186183 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186195 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186209 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186220 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186237 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186249 4884 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186260 4884 reconstruct.go:97] "Volume reconstruction finished" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.186267 4884 reconciler.go:26] "Reconciler: start to sync state" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.203120 4884 manager.go:324] Recovery completed Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.217149 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.218925 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.218974 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.218984 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.223664 4884 cpu_manager.go:225] "Starting CPU manager" policy="none" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.223764 4884 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.223811 4884 state_mem.go:36] "Initialized new in-memory state store" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.269180 4884 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.280983 4884 policy_none.go:49] "None policy: Start" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.281396 4884 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.282707 4884 memory_manager.go:170] "Starting memorymanager" policy="None" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.282762 4884 state_mem.go:35] "Initializing new in-memory state store" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.285176 4884 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.285392 4884 status_manager.go:217] "Starting to sync pod status with apiserver" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.285648 4884 kubelet.go:2335] "Starting kubelet main sync loop" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.285949 4884 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.286639 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.286757 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.352285 4884 manager.go:334] "Starting Device Plugin manager" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.352367 4884 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.352389 4884 server.go:79] "Starting device plugin registration server" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.353031 4884 eviction_manager.go:189] "Eviction manager: starting control loop" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.353064 4884 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.353520 4884 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.353659 4884 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.353673 4884 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.363644 4884 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.372799 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="400ms" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.387189 4884 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.387333 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.390600 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.390665 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.390690 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.390992 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.392172 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.392267 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.392841 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.392920 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.392943 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.393192 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.393380 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.393469 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.394205 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.394261 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.394281 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.395295 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.395355 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.395368 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.395795 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.395840 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.395854 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.396034 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.396248 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.396352 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.397286 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.397346 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.397365 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.397646 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.397932 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.397988 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.398011 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.398034 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.398055 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.399732 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.399772 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.399791 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.399826 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.399840 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.399854 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.400254 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.400317 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.401420 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.401484 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.401501 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.455140 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.457115 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.457162 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.457173 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.457203 4884 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.458105 4884 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.58:6443: connect: connection refused" node="crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490002 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490044 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490067 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490085 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490105 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490123 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490141 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490156 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490172 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490188 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490202 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490216 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490231 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490244 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.490257 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.590974 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591042 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591088 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591129 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591168 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591202 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591235 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591209 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591296 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591268 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591342 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591341 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591408 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591313 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591525 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591528 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591630 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591676 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591759 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591539 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591813 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591877 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591918 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.591940 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.592019 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.592112 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.592149 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.592230 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.592267 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.592415 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.658281 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.659656 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.659697 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.659710 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.659735 4884 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.660263 4884 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.58:6443: connect: connection refused" node="crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.731278 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.741404 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.763518 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.764658 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-4052ea71986db57f3df9c4f4d577fa645b131dbd3d17e646f377f8c64c4eb5e7 WatchSource:0}: Error finding container 4052ea71986db57f3df9c4f4d577fa645b131dbd3d17e646f377f8c64c4eb5e7: Status 404 returned error can't find the container with id 4052ea71986db57f3df9c4f4d577fa645b131dbd3d17e646f377f8c64c4eb5e7 Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.772924 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: E1210 00:30:27.774718 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="800ms" Dec 10 00:30:27 crc kubenswrapper[4884]: I1210 00:30:27.779892 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.780758 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-3acc96d1aca0db1588c7bffb8726121ea018f3205e666a50571fc6c88a2a3e75 WatchSource:0}: Error finding container 3acc96d1aca0db1588c7bffb8726121ea018f3205e666a50571fc6c88a2a3e75: Status 404 returned error can't find the container with id 3acc96d1aca0db1588c7bffb8726121ea018f3205e666a50571fc6c88a2a3e75 Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.789501 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-a43cfb187b2adcce855f16d9cf7ecde0ebc8da855340fdf02f3620e24fd83c8b WatchSource:0}: Error finding container a43cfb187b2adcce855f16d9cf7ecde0ebc8da855340fdf02f3620e24fd83c8b: Status 404 returned error can't find the container with id a43cfb187b2adcce855f16d9cf7ecde0ebc8da855340fdf02f3620e24fd83c8b Dec 10 00:30:27 crc kubenswrapper[4884]: W1210 00:30:27.803612 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-c263e5450064cb63cdd1aa58ce454bd35d302283a802654fbae59dd97be5cc7e WatchSource:0}: Error finding container c263e5450064cb63cdd1aa58ce454bd35d302283a802654fbae59dd97be5cc7e: Status 404 returned error can't find the container with id c263e5450064cb63cdd1aa58ce454bd35d302283a802654fbae59dd97be5cc7e Dec 10 00:30:28 crc kubenswrapper[4884]: W1210 00:30:28.016364 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Dec 10 00:30:28 crc kubenswrapper[4884]: E1210 00:30:28.016489 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.061351 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.064026 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.064078 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.064097 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.064137 4884 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 00:30:28 crc kubenswrapper[4884]: E1210 00:30:28.064729 4884 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.58:6443: connect: connection refused" node="crc" Dec 10 00:30:28 crc kubenswrapper[4884]: W1210 00:30:28.095961 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Dec 10 00:30:28 crc kubenswrapper[4884]: E1210 00:30:28.096036 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Dec 10 00:30:28 crc kubenswrapper[4884]: W1210 00:30:28.145575 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Dec 10 00:30:28 crc kubenswrapper[4884]: E1210 00:30:28.146064 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.159884 4884 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.160887 4884 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 22:13:06.209164814 +0000 UTC Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.160934 4884 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 237h42m38.048233291s for next certificate rotation Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.293315 4884 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8" exitCode=0 Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.293408 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8"} Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.293536 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"3acc96d1aca0db1588c7bffb8726121ea018f3205e666a50571fc6c88a2a3e75"} Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.293688 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.295175 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.295226 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.295241 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.296046 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e"} Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.296092 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3c8c01d56e1988a90b023b3b4f4d5aa136c98787f9c05f44027b85ba865a3505"} Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.298754 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f" exitCode=0 Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.298828 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f"} Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.298860 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4052ea71986db57f3df9c4f4d577fa645b131dbd3d17e646f377f8c64c4eb5e7"} Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.299014 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.299900 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.299934 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.299947 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.300974 4884 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="90220bd853a970fedca4fce5e6394366f4c8ccfdf9397de2c0e8e7959491c067" exitCode=0 Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.301053 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"90220bd853a970fedca4fce5e6394366f4c8ccfdf9397de2c0e8e7959491c067"} Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.301087 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c263e5450064cb63cdd1aa58ce454bd35d302283a802654fbae59dd97be5cc7e"} Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.301216 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.303246 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.303621 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.303650 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.303664 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.304415 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.304456 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.304469 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.305071 4884 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="66f957e72695eba257f69c29126275be8c12e89f821841ad5f673a635feda617" exitCode=0 Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.305116 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"66f957e72695eba257f69c29126275be8c12e89f821841ad5f673a635feda617"} Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.305138 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"a43cfb187b2adcce855f16d9cf7ecde0ebc8da855340fdf02f3620e24fd83c8b"} Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.305219 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.306514 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.306543 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.306565 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:28 crc kubenswrapper[4884]: E1210 00:30:28.575539 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="1.6s" Dec 10 00:30:28 crc kubenswrapper[4884]: W1210 00:30:28.594825 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.58:6443: connect: connection refused Dec 10 00:30:28 crc kubenswrapper[4884]: E1210 00:30:28.594917 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.58:6443: connect: connection refused" logger="UnhandledError" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.864992 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.867110 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.867158 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.867170 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:28 crc kubenswrapper[4884]: I1210 00:30:28.867206 4884 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 00:30:28 crc kubenswrapper[4884]: E1210 00:30:28.867836 4884 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.58:6443: connect: connection refused" node="crc" Dec 10 00:30:28 crc kubenswrapper[4884]: E1210 00:30:28.883340 4884 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.58:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187fb336dc3f296d default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-10 00:30:27.157256557 +0000 UTC m=+0.235213684,LastTimestamp:2025-12-10 00:30:27.157256557 +0000 UTC m=+0.235213684,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.155400 4884 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.310098 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"86f6f80c7bc3d5e5a08cd2a5433cc67788c108786fa8096cdc37d991d6003d8d"} Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.310235 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.311228 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.311262 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.311271 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.315850 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9"} Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.315878 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c"} Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.315892 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c"} Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.315972 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.323060 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.323146 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.323160 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.329263 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263"} Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.329420 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0"} Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.329453 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289"} Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.329373 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.334587 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.334629 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.334642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.341006 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8"} Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.341056 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa"} Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.341075 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e"} Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.344884 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b99c92f4955eea98b72fd114d6b2cc32c28b86b650aa4933b9219a2e19597d1e"} Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.345082 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.344872 4884 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b99c92f4955eea98b72fd114d6b2cc32c28b86b650aa4933b9219a2e19597d1e" exitCode=0 Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.346339 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.346379 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:29 crc kubenswrapper[4884]: I1210 00:30:29.346392 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.086091 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.092973 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.354600 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7"} Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.354675 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4"} Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.354882 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.356359 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.356411 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.356428 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.358267 4884 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="0b9037069b841636a787d5adb68e596da9538cd303fd621a7d912273c4917600" exitCode=0 Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.358475 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.358420 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"0b9037069b841636a787d5adb68e596da9538cd303fd621a7d912273c4917600"} Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.358894 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.360160 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.360197 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.360210 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.360301 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.360330 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.360346 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.468282 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.470234 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.470304 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.470323 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.470374 4884 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.728862 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:30 crc kubenswrapper[4884]: I1210 00:30:30.733359 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.370316 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"fde2ec61c3c8887402ec16b0298d7f39f65a9f0c87ef10182296b57a78557788"} Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.370419 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2d9733ccbaba989209badb1102eab9452f0d7928dd6428d5bb2202c3d82287c0"} Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.370479 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c6f32e67e4f0a06af9e79943b87ad5499507233082994748fe5e84f389304f13"} Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.370354 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.370525 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.370589 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.370636 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.372396 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.372407 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.372496 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.372465 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.372511 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:31 crc kubenswrapper[4884]: I1210 00:30:31.372530 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.348150 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.348524 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.350341 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.350395 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.350417 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.383155 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.383200 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.383264 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.383134 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"484e3a97dedf40d3b11b38d46480dab8cfa71bb15b2bd9f624efef19b39a4d27"} Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.383410 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ba0e14d544507c375e4ac580dd862428d2bc949396923b37ffd5b542ffa8a064"} Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.385017 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.385091 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.385115 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.385189 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.385251 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:32 crc kubenswrapper[4884]: I1210 00:30:32.385271 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:33 crc kubenswrapper[4884]: I1210 00:30:33.030249 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Dec 10 00:30:33 crc kubenswrapper[4884]: I1210 00:30:33.386275 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:33 crc kubenswrapper[4884]: I1210 00:30:33.389205 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:33 crc kubenswrapper[4884]: I1210 00:30:33.389274 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:33 crc kubenswrapper[4884]: I1210 00:30:33.389293 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:34 crc kubenswrapper[4884]: I1210 00:30:34.064021 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Dec 10 00:30:34 crc kubenswrapper[4884]: I1210 00:30:34.388129 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:34 crc kubenswrapper[4884]: I1210 00:30:34.389270 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:34 crc kubenswrapper[4884]: I1210 00:30:34.389697 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:34 crc kubenswrapper[4884]: I1210 00:30:34.389711 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:35 crc kubenswrapper[4884]: I1210 00:30:35.391307 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:35 crc kubenswrapper[4884]: I1210 00:30:35.392958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:35 crc kubenswrapper[4884]: I1210 00:30:35.393015 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:35 crc kubenswrapper[4884]: I1210 00:30:35.393033 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:36 crc kubenswrapper[4884]: I1210 00:30:36.549602 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:36 crc kubenswrapper[4884]: I1210 00:30:36.550674 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 00:30:36 crc kubenswrapper[4884]: I1210 00:30:36.550890 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:36 crc kubenswrapper[4884]: I1210 00:30:36.552783 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:36 crc kubenswrapper[4884]: I1210 00:30:36.552858 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:36 crc kubenswrapper[4884]: I1210 00:30:36.552938 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:37 crc kubenswrapper[4884]: I1210 00:30:37.022639 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:37 crc kubenswrapper[4884]: I1210 00:30:37.079303 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:37 crc kubenswrapper[4884]: I1210 00:30:37.079740 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:37 crc kubenswrapper[4884]: I1210 00:30:37.081526 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:37 crc kubenswrapper[4884]: I1210 00:30:37.081575 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:37 crc kubenswrapper[4884]: I1210 00:30:37.081593 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:37 crc kubenswrapper[4884]: E1210 00:30:37.363901 4884 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 10 00:30:37 crc kubenswrapper[4884]: I1210 00:30:37.395853 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:37 crc kubenswrapper[4884]: I1210 00:30:37.397765 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:37 crc kubenswrapper[4884]: I1210 00:30:37.397828 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:37 crc kubenswrapper[4884]: I1210 00:30:37.397849 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.115406 4884 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.115478 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 10 00:30:39 crc kubenswrapper[4884]: E1210 00:30:39.157229 4884 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": net/http: TLS handshake timeout" logger="UnhandledError" Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.160840 4884 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.182523 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.182954 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.184971 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.185120 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.185141 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.190990 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.403717 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.404915 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.404950 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.404962 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:39 crc kubenswrapper[4884]: W1210 00:30:39.783211 4884 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Dec 10 00:30:39 crc kubenswrapper[4884]: I1210 00:30:39.783353 4884 trace.go:236] Trace[1812160503]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (10-Dec-2025 00:30:29.781) (total time: 10001ms): Dec 10 00:30:39 crc kubenswrapper[4884]: Trace[1812160503]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (00:30:39.783) Dec 10 00:30:39 crc kubenswrapper[4884]: Trace[1812160503]: [10.001712836s] [10.001712836s] END Dec 10 00:30:39 crc kubenswrapper[4884]: E1210 00:30:39.783392 4884 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Dec 10 00:30:40 crc kubenswrapper[4884]: I1210 00:30:40.086988 4884 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\": RBAC: [clusterrole.rbac.authorization.k8s.io \"system:public-info-viewer\" not found, clusterrole.rbac.authorization.k8s.io \"system:openshift:public-info-viewer\" not found]","reason":"Forbidden","details":{},"code":403} Dec 10 00:30:40 crc kubenswrapper[4884]: I1210 00:30:40.087059 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 10 00:30:40 crc kubenswrapper[4884]: I1210 00:30:40.097850 4884 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\": RBAC: [clusterrole.rbac.authorization.k8s.io \"system:openshift:public-info-viewer\" not found, clusterrole.rbac.authorization.k8s.io \"system:public-info-viewer\" not found]","reason":"Forbidden","details":{},"code":403} Dec 10 00:30:40 crc kubenswrapper[4884]: I1210 00:30:40.097949 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 10 00:30:40 crc kubenswrapper[4884]: I1210 00:30:40.740956 4884 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]log ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]etcd ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/openshift.io-api-request-count-filter ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/openshift.io-startkubeinformers ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/generic-apiserver-start-informers ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/priority-and-fairness-config-consumer ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/priority-and-fairness-filter ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/start-apiextensions-informers ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/start-apiextensions-controllers ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/crd-informer-synced ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/start-system-namespaces-controller ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/start-cluster-authentication-info-controller ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/start-legacy-token-tracking-controller ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/start-service-ip-repair-controllers ok Dec 10 00:30:40 crc kubenswrapper[4884]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Dec 10 00:30:40 crc kubenswrapper[4884]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/priority-and-fairness-config-producer ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/bootstrap-controller ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/start-kube-aggregator-informers ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/apiservice-status-local-available-controller ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/apiservice-status-remote-available-controller ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/apiservice-registration-controller ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/apiservice-wait-for-first-sync ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/apiservice-discovery-controller ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/kube-apiserver-autoregistration ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]autoregister-completion ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/apiservice-openapi-controller ok Dec 10 00:30:40 crc kubenswrapper[4884]: [+]poststarthook/apiservice-openapiv3-controller ok Dec 10 00:30:40 crc kubenswrapper[4884]: livez check failed Dec 10 00:30:40 crc kubenswrapper[4884]: I1210 00:30:40.741068 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 00:30:42 crc kubenswrapper[4884]: I1210 00:30:42.182975 4884 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 00:30:42 crc kubenswrapper[4884]: I1210 00:30:42.183086 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 10 00:30:43 crc kubenswrapper[4884]: I1210 00:30:43.262674 4884 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Dec 10 00:30:43 crc kubenswrapper[4884]: I1210 00:30:43.284728 4884 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Dec 10 00:30:44 crc kubenswrapper[4884]: I1210 00:30:44.101259 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Dec 10 00:30:44 crc kubenswrapper[4884]: I1210 00:30:44.101544 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:44 crc kubenswrapper[4884]: I1210 00:30:44.103039 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:44 crc kubenswrapper[4884]: I1210 00:30:44.103141 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:44 crc kubenswrapper[4884]: I1210 00:30:44.103169 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:44 crc kubenswrapper[4884]: I1210 00:30:44.121544 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Dec 10 00:30:44 crc kubenswrapper[4884]: I1210 00:30:44.417463 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:44 crc kubenswrapper[4884]: I1210 00:30:44.418627 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:44 crc kubenswrapper[4884]: I1210 00:30:44.418708 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:44 crc kubenswrapper[4884]: I1210 00:30:44.418729 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:44 crc kubenswrapper[4884]: I1210 00:30:44.963295 4884 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.085858 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="3.2s" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.090760 4884 trace.go:236] Trace[353242433]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (10-Dec-2025 00:30:30.834) (total time: 14256ms): Dec 10 00:30:45 crc kubenswrapper[4884]: Trace[353242433]: ---"Objects listed" error: 14256ms (00:30:45.090) Dec 10 00:30:45 crc kubenswrapper[4884]: Trace[353242433]: [14.256202521s] [14.256202521s] END Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.091652 4884 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.093306 4884 trace.go:236] Trace[797609793]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (10-Dec-2025 00:30:30.981) (total time: 14111ms): Dec 10 00:30:45 crc kubenswrapper[4884]: Trace[797609793]: ---"Objects listed" error: 14111ms (00:30:45.093) Dec 10 00:30:45 crc kubenswrapper[4884]: Trace[797609793]: [14.111684678s] [14.111684678s] END Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.093358 4884 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.093877 4884 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.095193 4884 trace.go:236] Trace[1583083854]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (10-Dec-2025 00:30:30.554) (total time: 14540ms): Dec 10 00:30:45 crc kubenswrapper[4884]: Trace[1583083854]: ---"Objects listed" error: 14540ms (00:30:45.094) Dec 10 00:30:45 crc kubenswrapper[4884]: Trace[1583083854]: [14.540947865s] [14.540947865s] END Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.096005 4884 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.099749 4884 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.161395 4884 apiserver.go:52] "Watching apiserver" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.165634 4884 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.166027 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.166666 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.166904 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.167005 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.166897 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.167358 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.167717 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.167772 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.169001 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.167040 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.170259 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.172311 4884 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.174950 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.176057 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.176682 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.177059 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.177187 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.177608 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.177617 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.177741 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.194875 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.194926 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.194956 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.194981 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195006 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195036 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195061 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195085 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195126 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195148 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195170 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195194 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195218 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195244 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195269 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195296 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195323 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195354 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195379 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195408 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195460 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195487 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195509 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195531 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195553 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195577 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195624 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195650 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195674 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195699 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195722 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195745 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195768 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195791 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195818 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195844 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195868 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195891 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195912 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195965 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.195987 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196012 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196034 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196058 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196079 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196104 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196126 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196148 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196175 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196199 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196220 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196243 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196267 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196289 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196315 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196337 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196360 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196381 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196403 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196424 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196478 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196507 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196531 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196555 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196578 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196599 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196624 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196646 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196679 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196704 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196726 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196748 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196772 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196797 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196823 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196846 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196871 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196899 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196929 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196957 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.196985 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197014 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197134 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197160 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197183 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197206 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197229 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197252 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197275 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197297 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197320 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197345 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197370 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197394 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197417 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197461 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197485 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197508 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197530 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197552 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197577 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197601 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197624 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197649 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197672 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197697 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197720 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197742 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197765 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197792 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197819 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197843 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197867 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197891 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197915 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197938 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197961 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.197984 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198006 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198030 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198056 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198082 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198106 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198132 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198157 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198183 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198210 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198234 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198259 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198285 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198309 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198334 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198359 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198384 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198408 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198461 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198491 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198519 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198542 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198566 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198592 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198628 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198656 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198681 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198704 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198737 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198763 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198790 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198817 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198843 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198868 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198895 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198919 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198942 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198968 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.198993 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199019 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199049 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199075 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199104 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199132 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199158 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199184 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199209 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199234 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199259 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199293 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199321 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199351 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199378 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199424 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199469 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199495 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199524 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199552 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199579 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199604 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199630 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199656 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199682 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199707 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199732 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199757 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199785 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199812 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199836 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199861 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199887 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199887 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.199914 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200010 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200068 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200113 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200152 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200195 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200252 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200306 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200316 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200404 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200340 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200465 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200408 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200499 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200530 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200558 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200583 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200608 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200642 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200672 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200698 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200684 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200733 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200768 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200795 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200846 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200876 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200894 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200911 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200927 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.200931 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.200982 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.201043 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:45.701020808 +0000 UTC m=+18.778977945 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.201083 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.201276 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.201332 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.201419 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.201545 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.201993 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.201749 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.205677 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.205808 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.205966 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.206105 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.206083 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.206467 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.206500 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.206720 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.206910 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.207753 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:30:45.707721046 +0000 UTC m=+18.785678173 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.208225 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.208219 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.208486 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.208511 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.208655 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.208655 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.208890 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.208912 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.209094 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.209234 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.209371 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.209371 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.209745 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.209742 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.209808 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.210060 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.210093 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.210171 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.210655 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.210864 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.210924 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.210960 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.211475 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.211625 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:45.711601279 +0000 UTC m=+18.789558416 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.211985 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.212491 4884 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.212785 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.212813 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.212890 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.213147 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.214169 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.214372 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.214519 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.214599 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.215594 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.215267 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.215797 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.215364 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.215738 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.215754 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.215987 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.216313 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.216545 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.216668 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.217526 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.217730 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.218461 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.218776 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.218836 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.218919 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.218969 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.219307 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.219318 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.219776 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.219983 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.220044 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.220575 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.221033 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.221084 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.221219 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.221253 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.221308 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.221520 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.221780 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.221886 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.221907 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.222330 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.222357 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.222520 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.222628 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.223014 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.223009 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.223200 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.223417 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.223767 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.224021 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.224051 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.224069 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.224504 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.224527 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.224671 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.224914 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.225084 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.225314 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.225585 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.225608 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.225725 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.225807 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.225960 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.226214 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.226250 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.226908 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.227153 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.227175 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.227503 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.227747 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.228609 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.229026 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.229406 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.229497 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.229513 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.229881 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:45.729835028 +0000 UTC m=+18.807792145 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.229895 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.229918 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.230040 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.230072 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.210655 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.230094 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.230398 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.230657 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.230766 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.231423 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.231458 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.231587 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.231596 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.231863 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.231886 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.231901 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.231917 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.231992 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:45.731971494 +0000 UTC m=+18.809928621 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.232042 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.232410 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.232491 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.232726 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.232903 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.232946 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.232994 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.233024 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.233491 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.233633 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.233675 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.233837 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.233892 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.234356 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.234399 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.234727 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.234854 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.234880 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.234969 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.235289 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.235873 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.236144 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.236326 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.236344 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.236557 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.236608 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.237320 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.239261 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.239582 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.239729 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.241355 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.242503 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.243600 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.244119 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.244140 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.244417 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.244544 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.246289 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.246557 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.247179 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.246323 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.249204 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.249541 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.251654 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.251782 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.251811 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.252451 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.252870 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.256860 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.259157 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.259653 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.259854 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.260369 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.261168 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.261397 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.261591 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.262155 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.262874 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.269124 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.271245 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.273189 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.275211 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.276359 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.288090 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.288684 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.288758 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.291169 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.292912 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.296205 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.296695 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.297779 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.298347 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.299053 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.300956 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.304895 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.304641 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306091 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306230 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306238 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306297 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306310 4884 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306321 4884 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306332 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306344 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306358 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306367 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306387 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306396 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306407 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306416 4884 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306426 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306446 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306457 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306467 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306477 4884 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306487 4884 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306496 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306506 4884 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306514 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306522 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306530 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306546 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306556 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306565 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306574 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306584 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306592 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306601 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306611 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306619 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306628 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306637 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306645 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306654 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306682 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306694 4884 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306705 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306714 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306723 4884 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306732 4884 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306740 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306749 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306757 4884 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306766 4884 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306775 4884 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306784 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306792 4884 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306801 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306810 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306818 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306826 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306834 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306842 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306850 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306858 4884 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306867 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306876 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306884 4884 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306892 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306902 4884 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306910 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306919 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306929 4884 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306937 4884 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306945 4884 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306953 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306962 4884 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306972 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306983 4884 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.306992 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307000 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307008 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307016 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307024 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307033 4884 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307040 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307048 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307057 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307065 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307064 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307544 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307587 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307563 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307605 4884 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307661 4884 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307687 4884 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307697 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307718 4884 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307726 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307737 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307746 4884 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307755 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307764 4884 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307773 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307782 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307790 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307799 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307809 4884 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307819 4884 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307828 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307836 4884 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.307845 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308195 4884 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308225 4884 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308235 4884 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308247 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308256 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308263 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308273 4884 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308282 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308295 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308305 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308314 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308323 4884 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308333 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308342 4884 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308352 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308361 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308373 4884 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308382 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308391 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308399 4884 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308408 4884 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308417 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308425 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308519 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308527 4884 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308538 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308550 4884 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308562 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308571 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.308580 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311266 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.309846 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311290 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311353 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311414 4884 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311447 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311458 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311467 4884 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311477 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311487 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311498 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311508 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311518 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311527 4884 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311538 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311547 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311557 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311566 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311576 4884 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311586 4884 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311670 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311681 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311709 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311719 4884 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311729 4884 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311738 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311747 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.311758 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312184 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312195 4884 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312204 4884 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312232 4884 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312250 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312260 4884 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312270 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312279 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312366 4884 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312378 4884 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312388 4884 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312397 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312409 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312478 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312487 4884 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312497 4884 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312505 4884 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312530 4884 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312529 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312554 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312565 4884 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312574 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312583 4884 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312610 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312620 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312632 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.312642 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.314320 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.315244 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.315873 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.317095 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.320079 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.320909 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.323038 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.325649 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.328543 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.329623 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.331157 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.331596 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.331797 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.333159 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.333924 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.334612 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.335785 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.336387 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.337845 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.339102 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.339398 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.340056 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.341472 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.342187 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.343131 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.343729 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.344645 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.345260 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.356911 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.370031 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.370586 4884 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.370754 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.373906 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.375212 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.375800 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.413813 4884 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.413844 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.413863 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.413874 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.422780 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.426579 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4" exitCode=255 Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.431745 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.489416 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.513562 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.519533 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.520153 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.521879 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.523182 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.525073 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.526282 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.528073 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.529312 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.530617 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.531748 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.532878 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.567545 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.606393 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.606941 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.607560 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.608023 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.608566 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.609175 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.610535 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.611003 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4"} Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.716824 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.716955 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.717042 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.717229 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.717317 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:46.717294989 +0000 UTC m=+19.795252136 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.717415 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:30:46.717401126 +0000 UTC m=+19.795358283 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.717529 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.717575 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:46.717562258 +0000 UTC m=+19.795519415 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.740122 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.818701 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.818789 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.818988 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.819041 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.819058 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.818997 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.819165 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:46.819136203 +0000 UTC m=+19.897093330 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.819167 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.819205 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:45 crc kubenswrapper[4884]: E1210 00:30:45.819320 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:46.819290184 +0000 UTC m=+19.897247341 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:45 crc kubenswrapper[4884]: W1210 00:30:45.987912 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-910192bae1fd5660200ce2fa60419a55de45c6b58483c7046de7ca3862d69a2d WatchSource:0}: Error finding container 910192bae1fd5660200ce2fa60419a55de45c6b58483c7046de7ca3862d69a2d: Status 404 returned error can't find the container with id 910192bae1fd5660200ce2fa60419a55de45c6b58483c7046de7ca3862d69a2d Dec 10 00:30:45 crc kubenswrapper[4884]: I1210 00:30:45.995878 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:45.999882 4884 scope.go:117] "RemoveContainer" containerID="e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.010188 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.053758 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.077254 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.094882 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.107912 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.120015 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.133929 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.150535 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.169561 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.182835 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.197918 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.211621 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.227740 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.286264 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.286419 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.430199 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd"} Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.430247 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"5db47b1eab317410dc667f7eadd9fc9e4fb61459c5db3dd83ccd817902c26cf4"} Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.432395 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.434634 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32"} Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.434924 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.436175 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"910192bae1fd5660200ce2fa60419a55de45c6b58483c7046de7ca3862d69a2d"} Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.438205 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295"} Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.438273 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121"} Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.438290 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"0748d7415b9ec640284f982c070fbb36fd8d59e4af1993d5a01d45ed5dbad792"} Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.441661 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.456921 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.481144 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.500280 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.522787 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.542076 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.561081 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.578113 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.602680 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.630062 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.651725 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.676621 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.709018 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.733332 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.749424 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.749564 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.749613 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.749674 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:30:48.749640267 +0000 UTC m=+21.827597424 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.749692 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.749747 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:48.749736164 +0000 UTC m=+21.827693291 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.749822 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.749946 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:48.749922437 +0000 UTC m=+21.827879614 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.753097 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:46Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.850963 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:46 crc kubenswrapper[4884]: I1210 00:30:46.851018 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.851146 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.851161 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.851173 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.851214 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.851253 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.851266 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.851223 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:48.85121029 +0000 UTC m=+21.929167397 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:46 crc kubenswrapper[4884]: E1210 00:30:46.851340 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:48.851319748 +0000 UTC m=+21.929276865 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:47 crc kubenswrapper[4884]: I1210 00:30:47.286444 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:47 crc kubenswrapper[4884]: I1210 00:30:47.286517 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:47 crc kubenswrapper[4884]: E1210 00:30:47.286611 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:30:47 crc kubenswrapper[4884]: E1210 00:30:47.286767 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:30:47 crc kubenswrapper[4884]: I1210 00:30:47.290958 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Dec 10 00:30:47 crc kubenswrapper[4884]: I1210 00:30:47.303083 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:47 crc kubenswrapper[4884]: I1210 00:30:47.318253 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:47 crc kubenswrapper[4884]: I1210 00:30:47.330049 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:47 crc kubenswrapper[4884]: I1210 00:30:47.350638 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:47 crc kubenswrapper[4884]: I1210 00:30:47.375390 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:47 crc kubenswrapper[4884]: I1210 00:30:47.399283 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:47 crc kubenswrapper[4884]: I1210 00:30:47.416407 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.286843 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.287104 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.300718 4884 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.303906 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.303974 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.303993 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.304114 4884 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.317711 4884 kubelet_node_status.go:115] "Node was previously registered" node="crc" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.318162 4884 kubelet_node_status.go:79] "Successfully registered node" node="crc" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.319940 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.320002 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.320021 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.320052 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.320074 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:48Z","lastTransitionTime":"2025-12-10T00:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.346288 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:48Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.352382 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.352601 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.352697 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.352795 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.352885 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:48Z","lastTransitionTime":"2025-12-10T00:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.370563 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:48Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.376626 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.376695 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.376715 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.376744 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.376768 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:48Z","lastTransitionTime":"2025-12-10T00:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.399341 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:48Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.406672 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.406731 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.406750 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.406782 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.406802 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:48Z","lastTransitionTime":"2025-12-10T00:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.427675 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:48Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.433830 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.433878 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.433896 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.433920 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.433937 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:48Z","lastTransitionTime":"2025-12-10T00:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.461456 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:48Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.461650 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.464009 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.464044 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.464053 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.464073 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.464084 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:48Z","lastTransitionTime":"2025-12-10T00:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.567789 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.567848 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.567866 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.567892 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.567912 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:48Z","lastTransitionTime":"2025-12-10T00:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.671239 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.671299 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.671319 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.671348 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.671366 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:48Z","lastTransitionTime":"2025-12-10T00:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.768844 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.768988 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.769051 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.769237 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.769239 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.769331 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:52.769306462 +0000 UTC m=+25.847263619 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.769390 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:52.769351183 +0000 UTC m=+25.847308310 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.769721 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:30:52.769678472 +0000 UTC m=+25.847635619 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.774101 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.774157 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.774180 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.774205 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.774222 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:48Z","lastTransitionTime":"2025-12-10T00:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.870263 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.870327 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.870516 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.870537 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.870551 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.870608 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.870659 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.870679 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.870618 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:52.870600099 +0000 UTC m=+25.948557216 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:48 crc kubenswrapper[4884]: E1210 00:30:48.870845 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 00:30:52.870780244 +0000 UTC m=+25.948737401 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.877490 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.877562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.877584 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.877616 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:48 crc kubenswrapper[4884]: I1210 00:30:48.877637 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:48Z","lastTransitionTime":"2025-12-10T00:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:48.981566 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:48.981626 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:48.981645 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:48.981672 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:48.981691 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:48Z","lastTransitionTime":"2025-12-10T00:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.085496 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.085567 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.085586 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.085612 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.085631 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:49Z","lastTransitionTime":"2025-12-10T00:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.189052 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.189053 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.192400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.192418 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.192466 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.192482 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:49Z","lastTransitionTime":"2025-12-10T00:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.203352 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.208854 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.213092 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.234868 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.260195 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.279174 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.286396 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.286526 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:49 crc kubenswrapper[4884]: E1210 00:30:49.286563 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:30:49 crc kubenswrapper[4884]: E1210 00:30:49.286728 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.295260 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.295328 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.295348 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.295372 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.295390 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:49Z","lastTransitionTime":"2025-12-10T00:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.301506 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.321386 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.341279 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.357828 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.377049 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.393368 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.398614 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.398677 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.398690 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.398718 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.398734 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:49Z","lastTransitionTime":"2025-12-10T00:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.410398 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.433872 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.452295 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.467925 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.483347 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:49Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.501483 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.501546 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.501559 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.501582 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.501602 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:49Z","lastTransitionTime":"2025-12-10T00:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.604954 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.605011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.605022 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.605048 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.605063 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:49Z","lastTransitionTime":"2025-12-10T00:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.707287 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.707330 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.707343 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.707361 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.707371 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:49Z","lastTransitionTime":"2025-12-10T00:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.713929 4884 csr.go:261] certificate signing request csr-qzg98 is approved, waiting to be issued Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.750176 4884 csr.go:257] certificate signing request csr-qzg98 is issued Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.809501 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.809549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.809562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.809582 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.809597 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:49Z","lastTransitionTime":"2025-12-10T00:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.911931 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.911977 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.911990 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.912006 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:49 crc kubenswrapper[4884]: I1210 00:30:49.912018 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:49Z","lastTransitionTime":"2025-12-10T00:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.014408 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.014469 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.014480 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.014497 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.014508 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:50Z","lastTransitionTime":"2025-12-10T00:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.116666 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.116707 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.116717 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.116737 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.116749 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:50Z","lastTransitionTime":"2025-12-10T00:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.191732 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-8zcgx"] Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.192089 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-pxpwg"] Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.192221 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-rcj68"] Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.192289 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.192361 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-pxpwg" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.192884 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.197920 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-bc4r6"] Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.197986 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.198019 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.199565 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.199862 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.200519 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.200705 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.200768 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.201039 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.201200 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.201226 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.201515 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.201540 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.201895 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.202750 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.215375 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.220217 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.220256 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.220267 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.220286 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.220306 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:50Z","lastTransitionTime":"2025-12-10T00:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.221118 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.225974 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.246115 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.262675 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.278883 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.281840 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-multus-conf-dir\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.281898 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-run-k8s-cni-cncf-io\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.281949 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-run-netns\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.281981 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5eaf2b70-ff64-41f3-b879-0e50bdcd06ae-mcd-auth-proxy-config\") pod \"machine-config-daemon-8zcgx\" (UID: \"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\") " pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.281999 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nqsm\" (UniqueName: \"kubernetes.io/projected/5eaf2b70-ff64-41f3-b879-0e50bdcd06ae-kube-api-access-6nqsm\") pod \"machine-config-daemon-8zcgx\" (UID: \"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\") " pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282018 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-os-release\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282035 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-multus-cni-dir\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282054 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-multus-socket-dir-parent\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282189 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-cnibin\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282223 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-etc-kubernetes\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282246 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rchwx\" (UniqueName: \"kubernetes.io/projected/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-kube-api-access-rchwx\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282270 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-var-lib-kubelet\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282294 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-system-cni-dir\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282317 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-system-cni-dir\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282339 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282376 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-var-lib-cni-multus\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282500 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5eaf2b70-ff64-41f3-b879-0e50bdcd06ae-proxy-tls\") pod \"machine-config-daemon-8zcgx\" (UID: \"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\") " pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282555 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-var-lib-cni-bin\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282579 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-hostroot\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282600 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-run-multus-certs\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282626 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5eaf2b70-ff64-41f3-b879-0e50bdcd06ae-rootfs\") pod \"machine-config-daemon-8zcgx\" (UID: \"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\") " pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282647 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-cni-binary-copy\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282668 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0269081f-f135-4e66-91fd-a16277a00355-cni-binary-copy\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282690 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-cnibin\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282708 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvb85\" (UniqueName: \"kubernetes.io/projected/86246072-0dd6-41fb-878f-715a35fd98ce-kube-api-access-lvb85\") pod \"node-resolver-pxpwg\" (UID: \"86246072-0dd6-41fb-878f-715a35fd98ce\") " pod="openshift-dns/node-resolver-pxpwg" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282755 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/86246072-0dd6-41fb-878f-715a35fd98ce-hosts-file\") pod \"node-resolver-pxpwg\" (UID: \"86246072-0dd6-41fb-878f-715a35fd98ce\") " pod="openshift-dns/node-resolver-pxpwg" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282787 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282809 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-os-release\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282832 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0269081f-f135-4e66-91fd-a16277a00355-multus-daemon-config\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.282852 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22hrj\" (UniqueName: \"kubernetes.io/projected/0269081f-f135-4e66-91fd-a16277a00355-kube-api-access-22hrj\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.286327 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:50 crc kubenswrapper[4884]: E1210 00:30:50.286489 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.290790 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.306364 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.322696 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.322744 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.322758 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.322778 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.322793 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:50Z","lastTransitionTime":"2025-12-10T00:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.324285 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.339541 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.355412 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.368496 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.383825 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-cnibin\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.383880 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvb85\" (UniqueName: \"kubernetes.io/projected/86246072-0dd6-41fb-878f-715a35fd98ce-kube-api-access-lvb85\") pod \"node-resolver-pxpwg\" (UID: \"86246072-0dd6-41fb-878f-715a35fd98ce\") " pod="openshift-dns/node-resolver-pxpwg" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.383901 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/86246072-0dd6-41fb-878f-715a35fd98ce-hosts-file\") pod \"node-resolver-pxpwg\" (UID: \"86246072-0dd6-41fb-878f-715a35fd98ce\") " pod="openshift-dns/node-resolver-pxpwg" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.383926 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.383949 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-os-release\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.383968 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0269081f-f135-4e66-91fd-a16277a00355-multus-daemon-config\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.383992 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22hrj\" (UniqueName: \"kubernetes.io/projected/0269081f-f135-4e66-91fd-a16277a00355-kube-api-access-22hrj\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384016 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-multus-conf-dir\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384047 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5eaf2b70-ff64-41f3-b879-0e50bdcd06ae-mcd-auth-proxy-config\") pod \"machine-config-daemon-8zcgx\" (UID: \"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\") " pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384068 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-run-k8s-cni-cncf-io\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384090 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-run-netns\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384117 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nqsm\" (UniqueName: \"kubernetes.io/projected/5eaf2b70-ff64-41f3-b879-0e50bdcd06ae-kube-api-access-6nqsm\") pod \"machine-config-daemon-8zcgx\" (UID: \"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\") " pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384189 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-os-release\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384208 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-multus-cni-dir\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384231 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-multus-socket-dir-parent\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384260 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-cnibin\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384278 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rchwx\" (UniqueName: \"kubernetes.io/projected/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-kube-api-access-rchwx\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384296 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-var-lib-kubelet\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384313 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-etc-kubernetes\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384330 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-system-cni-dir\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384350 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384385 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-system-cni-dir\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384391 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-os-release\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384408 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-var-lib-cni-multus\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384433 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5eaf2b70-ff64-41f3-b879-0e50bdcd06ae-proxy-tls\") pod \"machine-config-daemon-8zcgx\" (UID: \"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\") " pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384470 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-cni-binary-copy\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384488 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0269081f-f135-4e66-91fd-a16277a00355-cni-binary-copy\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384492 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-multus-cni-dir\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.383959 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-cnibin\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384532 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-var-lib-cni-bin\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384551 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-multus-socket-dir-parent\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384590 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-cnibin\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384680 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-multus-conf-dir\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384769 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-var-lib-kubelet\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384797 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-etc-kubernetes\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384833 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-system-cni-dir\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384961 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/86246072-0dd6-41fb-878f-715a35fd98ce-hosts-file\") pod \"node-resolver-pxpwg\" (UID: \"86246072-0dd6-41fb-878f-715a35fd98ce\") " pod="openshift-dns/node-resolver-pxpwg" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385082 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385217 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-system-cni-dir\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.384505 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-var-lib-cni-bin\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385385 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-hostroot\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385508 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5eaf2b70-ff64-41f3-b879-0e50bdcd06ae-mcd-auth-proxy-config\") pod \"machine-config-daemon-8zcgx\" (UID: \"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\") " pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385418 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-run-k8s-cni-cncf-io\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385534 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-run-netns\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385610 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-hostroot\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385558 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-var-lib-cni-multus\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385628 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-run-multus-certs\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385693 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0269081f-f135-4e66-91fd-a16277a00355-multus-daemon-config\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385725 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-cni-binary-copy\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385438 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0269081f-f135-4e66-91fd-a16277a00355-host-run-multus-certs\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385756 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-os-release\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385797 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5eaf2b70-ff64-41f3-b879-0e50bdcd06ae-rootfs\") pod \"machine-config-daemon-8zcgx\" (UID: \"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\") " pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385840 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0269081f-f135-4e66-91fd-a16277a00355-cni-binary-copy\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.385908 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5eaf2b70-ff64-41f3-b879-0e50bdcd06ae-rootfs\") pod \"machine-config-daemon-8zcgx\" (UID: \"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\") " pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.386051 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.387560 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.391501 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5eaf2b70-ff64-41f3-b879-0e50bdcd06ae-proxy-tls\") pod \"machine-config-daemon-8zcgx\" (UID: \"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\") " pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.401136 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvb85\" (UniqueName: \"kubernetes.io/projected/86246072-0dd6-41fb-878f-715a35fd98ce-kube-api-access-lvb85\") pod \"node-resolver-pxpwg\" (UID: \"86246072-0dd6-41fb-878f-715a35fd98ce\") " pod="openshift-dns/node-resolver-pxpwg" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.403920 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nqsm\" (UniqueName: \"kubernetes.io/projected/5eaf2b70-ff64-41f3-b879-0e50bdcd06ae-kube-api-access-6nqsm\") pod \"machine-config-daemon-8zcgx\" (UID: \"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\") " pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.412135 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22hrj\" (UniqueName: \"kubernetes.io/projected/0269081f-f135-4e66-91fd-a16277a00355-kube-api-access-22hrj\") pod \"multus-rcj68\" (UID: \"0269081f-f135-4e66-91fd-a16277a00355\") " pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.416471 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.426285 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.426320 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.426330 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.426346 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.426358 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:50Z","lastTransitionTime":"2025-12-10T00:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.427236 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rchwx\" (UniqueName: \"kubernetes.io/projected/1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7-kube-api-access-rchwx\") pod \"multus-additional-cni-plugins-bc4r6\" (UID: \"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\") " pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.432476 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.447329 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.455366 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b"} Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.466792 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.486295 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.500205 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.512070 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.522361 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.524635 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-pxpwg" Dec 10 00:30:50 crc kubenswrapper[4884]: W1210 00:30:50.527543 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5eaf2b70_ff64_41f3_b879_0e50bdcd06ae.slice/crio-30b726221c2615819f58ae729c7307f0fb3d12eb0746c3283593ac19f4737366 WatchSource:0}: Error finding container 30b726221c2615819f58ae729c7307f0fb3d12eb0746c3283593ac19f4737366: Status 404 returned error can't find the container with id 30b726221c2615819f58ae729c7307f0fb3d12eb0746c3283593ac19f4737366 Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.528369 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.528400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.528412 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.530727 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.530782 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:50Z","lastTransitionTime":"2025-12-10T00:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.533370 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rcj68" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.540315 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.550603 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.589738 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.622873 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.643694 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.643729 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.643737 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.643753 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.643765 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:50Z","lastTransitionTime":"2025-12-10T00:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.644767 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-g8w62"] Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.645565 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.665575 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.665906 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.674964 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.675139 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.674970 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.675550 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.675820 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.676236 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.711368 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.742642 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.751173 4884 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-12-10 00:25:49 +0000 UTC, rotation deadline is 2026-09-23 07:52:11.276131216 +0000 UTC Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.751486 4884 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 6895h21m20.524651105s for next certificate rotation Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.751606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.751624 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.751631 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.751648 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.751661 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:50Z","lastTransitionTime":"2025-12-10T00:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.764748 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.789969 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-run-netns\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790028 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-systemd-units\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790058 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-node-log\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790091 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9hvx\" (UniqueName: \"kubernetes.io/projected/7022e894-7a34-4a84-8b18-e4440e11e659-kube-api-access-c9hvx\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790116 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-systemd\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790141 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7022e894-7a34-4a84-8b18-e4440e11e659-ovn-node-metrics-cert\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790164 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-ovnkube-script-lib\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790189 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-kubelet\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790215 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-etc-openvswitch\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790260 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-ovn\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790283 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-cni-bin\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790298 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-ovnkube-config\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790314 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-env-overrides\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790331 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-var-lib-openvswitch\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790350 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-cni-netd\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790368 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790396 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-openvswitch\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790413 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-run-ovn-kubernetes\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790474 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-slash\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.790490 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-log-socket\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.810698 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.826605 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.843359 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.868179 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.868215 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.868225 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.868243 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.868253 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:50Z","lastTransitionTime":"2025-12-10T00:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.875825 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.890848 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-systemd-units\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.890888 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-node-log\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.890907 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9hvx\" (UniqueName: \"kubernetes.io/projected/7022e894-7a34-4a84-8b18-e4440e11e659-kube-api-access-c9hvx\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.890925 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-systemd\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.890945 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-ovnkube-script-lib\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.890968 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7022e894-7a34-4a84-8b18-e4440e11e659-ovn-node-metrics-cert\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.890989 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-kubelet\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891007 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-etc-openvswitch\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891024 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-ovn\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891052 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-cni-bin\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891068 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-ovnkube-config\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891082 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-env-overrides\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891077 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-systemd-units\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891126 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-kubelet\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891077 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-node-log\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891163 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-etc-openvswitch\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891141 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-var-lib-openvswitch\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891101 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-var-lib-openvswitch\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891216 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-cni-bin\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891240 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-cni-netd\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891253 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-systemd\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891269 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891341 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-openvswitch\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891365 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-run-ovn-kubernetes\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891411 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-slash\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891432 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-log-socket\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891469 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-run-netns\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891538 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-run-netns\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891196 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-ovn\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.891941 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-ovnkube-config\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.892020 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-run-ovn-kubernetes\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.892026 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-cni-netd\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.892058 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-openvswitch\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.892083 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-slash\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.892111 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-log-socket\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.892324 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.892395 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-ovnkube-script-lib\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.892671 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-env-overrides\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.897377 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7022e894-7a34-4a84-8b18-e4440e11e659-ovn-node-metrics-cert\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.897483 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.909752 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9hvx\" (UniqueName: \"kubernetes.io/projected/7022e894-7a34-4a84-8b18-e4440e11e659-kube-api-access-c9hvx\") pod \"ovnkube-node-g8w62\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.924852 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.940797 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.959146 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.970337 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.970377 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.970386 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.970403 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.970414 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:50Z","lastTransitionTime":"2025-12-10T00:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.975344 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:50 crc kubenswrapper[4884]: I1210 00:30:50.990648 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.004276 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.005029 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: W1210 00:30:51.017574 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7022e894_7a34_4a84_8b18_e4440e11e659.slice/crio-19146dd173a1f6b857d3a1ba60aaeb8b0f2aee62980cfdd2b64fa271bfec029d WatchSource:0}: Error finding container 19146dd173a1f6b857d3a1ba60aaeb8b0f2aee62980cfdd2b64fa271bfec029d: Status 404 returned error can't find the container with id 19146dd173a1f6b857d3a1ba60aaeb8b0f2aee62980cfdd2b64fa271bfec029d Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.029310 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.065094 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.073722 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.073759 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.073770 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.073783 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.073793 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:51Z","lastTransitionTime":"2025-12-10T00:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.079741 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.097126 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.119164 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.135869 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.153770 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.168087 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.176404 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.176480 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.176496 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.176518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.176531 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:51Z","lastTransitionTime":"2025-12-10T00:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.183711 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.197671 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.280033 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.280086 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.280100 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.280121 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.280135 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:51Z","lastTransitionTime":"2025-12-10T00:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.286300 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.286337 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:51 crc kubenswrapper[4884]: E1210 00:30:51.286441 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:30:51 crc kubenswrapper[4884]: E1210 00:30:51.286493 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.384125 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.384597 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.384607 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.384648 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.384661 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:51Z","lastTransitionTime":"2025-12-10T00:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.460249 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa" exitCode=0 Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.460345 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.460432 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"19146dd173a1f6b857d3a1ba60aaeb8b0f2aee62980cfdd2b64fa271bfec029d"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.463139 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.463185 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.463199 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"30b726221c2615819f58ae729c7307f0fb3d12eb0746c3283593ac19f4737366"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.464665 4884 generic.go:334] "Generic (PLEG): container finished" podID="1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7" containerID="2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790" exitCode=0 Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.464741 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" event={"ID":"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7","Type":"ContainerDied","Data":"2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.464761 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" event={"ID":"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7","Type":"ContainerStarted","Data":"eab35f2abff65765b666b850bb2ca4aff8ffd8a3c635f2beecafe5480fa24f84"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.466273 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rcj68" event={"ID":"0269081f-f135-4e66-91fd-a16277a00355","Type":"ContainerStarted","Data":"bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.466300 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rcj68" event={"ID":"0269081f-f135-4e66-91fd-a16277a00355","Type":"ContainerStarted","Data":"76b6dab674192ed52a698389267a820dc08ea2e9dce47cb007712ddd68060848"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.468862 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-pxpwg" event={"ID":"86246072-0dd6-41fb-878f-715a35fd98ce","Type":"ContainerStarted","Data":"faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.468916 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-pxpwg" event={"ID":"86246072-0dd6-41fb-878f-715a35fd98ce","Type":"ContainerStarted","Data":"071eade36e9cafde33f0814a6b597d39fe26d722344347534a3cb127effaaf93"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.481485 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.488723 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.488790 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.488803 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.488827 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.488841 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:51Z","lastTransitionTime":"2025-12-10T00:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.498351 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.516119 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.530787 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.545733 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.561368 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.577167 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.589529 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.592315 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.592370 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.592689 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.592729 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.592741 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:51Z","lastTransitionTime":"2025-12-10T00:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.605604 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.620291 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.631110 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.646416 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.664321 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.677672 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.687995 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.695730 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.695760 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.695769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.695784 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.695793 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:51Z","lastTransitionTime":"2025-12-10T00:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.700392 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.712913 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.725148 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.741513 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.760281 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.777518 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.793834 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.797957 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.797985 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.797995 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.798014 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.798028 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:51Z","lastTransitionTime":"2025-12-10T00:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.810694 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.832134 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.853014 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.868846 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.902232 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.902305 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.902321 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.902342 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:51 crc kubenswrapper[4884]: I1210 00:30:51.902354 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:51Z","lastTransitionTime":"2025-12-10T00:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.006143 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.006838 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.006849 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.006871 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.006885 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:52Z","lastTransitionTime":"2025-12-10T00:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.110234 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.110281 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.110293 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.110314 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.110326 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:52Z","lastTransitionTime":"2025-12-10T00:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.213067 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.213105 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.213115 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.213132 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.213143 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:52Z","lastTransitionTime":"2025-12-10T00:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.229822 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-4dtwx"] Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.230330 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-4dtwx" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.233284 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.233408 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.233577 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.233673 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.256359 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.273567 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.286109 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.286489 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.286613 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.307020 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ae121743-2040-4f84-8bb7-4a04cff7bd31-serviceca\") pod \"node-ca-4dtwx\" (UID: \"ae121743-2040-4f84-8bb7-4a04cff7bd31\") " pod="openshift-image-registry/node-ca-4dtwx" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.307280 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zlgc\" (UniqueName: \"kubernetes.io/projected/ae121743-2040-4f84-8bb7-4a04cff7bd31-kube-api-access-7zlgc\") pod \"node-ca-4dtwx\" (UID: \"ae121743-2040-4f84-8bb7-4a04cff7bd31\") " pod="openshift-image-registry/node-ca-4dtwx" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.307418 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ae121743-2040-4f84-8bb7-4a04cff7bd31-host\") pod \"node-ca-4dtwx\" (UID: \"ae121743-2040-4f84-8bb7-4a04cff7bd31\") " pod="openshift-image-registry/node-ca-4dtwx" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.313068 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.316167 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.316200 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.316209 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.316227 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.316237 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:52Z","lastTransitionTime":"2025-12-10T00:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.345873 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.363551 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.379417 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.397897 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.408058 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zlgc\" (UniqueName: \"kubernetes.io/projected/ae121743-2040-4f84-8bb7-4a04cff7bd31-kube-api-access-7zlgc\") pod \"node-ca-4dtwx\" (UID: \"ae121743-2040-4f84-8bb7-4a04cff7bd31\") " pod="openshift-image-registry/node-ca-4dtwx" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.408145 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ae121743-2040-4f84-8bb7-4a04cff7bd31-host\") pod \"node-ca-4dtwx\" (UID: \"ae121743-2040-4f84-8bb7-4a04cff7bd31\") " pod="openshift-image-registry/node-ca-4dtwx" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.408192 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ae121743-2040-4f84-8bb7-4a04cff7bd31-serviceca\") pod \"node-ca-4dtwx\" (UID: \"ae121743-2040-4f84-8bb7-4a04cff7bd31\") " pod="openshift-image-registry/node-ca-4dtwx" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.408603 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/ae121743-2040-4f84-8bb7-4a04cff7bd31-host\") pod \"node-ca-4dtwx\" (UID: \"ae121743-2040-4f84-8bb7-4a04cff7bd31\") " pod="openshift-image-registry/node-ca-4dtwx" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.409379 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/ae121743-2040-4f84-8bb7-4a04cff7bd31-serviceca\") pod \"node-ca-4dtwx\" (UID: \"ae121743-2040-4f84-8bb7-4a04cff7bd31\") " pod="openshift-image-registry/node-ca-4dtwx" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.411884 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.418389 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.418671 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.418748 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.418849 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.418917 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:52Z","lastTransitionTime":"2025-12-10T00:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.428720 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.435313 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zlgc\" (UniqueName: \"kubernetes.io/projected/ae121743-2040-4f84-8bb7-4a04cff7bd31-kube-api-access-7zlgc\") pod \"node-ca-4dtwx\" (UID: \"ae121743-2040-4f84-8bb7-4a04cff7bd31\") " pod="openshift-image-registry/node-ca-4dtwx" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.446205 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.462360 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.476770 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.476829 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.476839 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.476851 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.476859 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.476869 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.479222 4884 generic.go:334] "Generic (PLEG): container finished" podID="1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7" containerID="d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006" exitCode=0 Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.479271 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" event={"ID":"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7","Type":"ContainerDied","Data":"d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.483323 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.499909 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.516632 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.528625 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.528678 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.528706 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.528730 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.528743 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:52Z","lastTransitionTime":"2025-12-10T00:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.545148 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-4dtwx" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.551819 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: W1210 00:30:52.569639 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae121743_2040_4f84_8bb7_4a04cff7bd31.slice/crio-74e73af6f14060790a118a043a85d519e74eed918201e11128e8ed253d4c7177 WatchSource:0}: Error finding container 74e73af6f14060790a118a043a85d519e74eed918201e11128e8ed253d4c7177: Status 404 returned error can't find the container with id 74e73af6f14060790a118a043a85d519e74eed918201e11128e8ed253d4c7177 Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.570827 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.587107 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.604915 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.622505 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.631874 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.631985 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.632046 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.632105 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.632158 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:52Z","lastTransitionTime":"2025-12-10T00:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.643821 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.660191 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.678481 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.696763 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.726089 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.736230 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.736279 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.736293 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.736312 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.736326 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:52Z","lastTransitionTime":"2025-12-10T00:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.770876 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.805616 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.812039 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.812241 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.812297 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.812487 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.812558 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:31:00.812537062 +0000 UTC m=+33.890494199 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.812629 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:31:00.812620024 +0000 UTC m=+33.890577161 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.812717 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.812760 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:31:00.812751308 +0000 UTC m=+33.890708435 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.842987 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.843035 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.843049 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.843066 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.843080 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:52Z","lastTransitionTime":"2025-12-10T00:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.853621 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.913603 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.913662 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.913819 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.913843 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.913858 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.913915 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 00:31:00.913897351 +0000 UTC m=+33.991854478 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.914308 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.914357 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.914374 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:52 crc kubenswrapper[4884]: E1210 00:30:52.914475 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 00:31:00.914453207 +0000 UTC m=+33.992410314 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.947037 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.947084 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.947095 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.947118 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:52 crc kubenswrapper[4884]: I1210 00:30:52.947132 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:52Z","lastTransitionTime":"2025-12-10T00:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.050326 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.050398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.050416 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.050439 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.050470 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:53Z","lastTransitionTime":"2025-12-10T00:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.153405 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.153464 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.153480 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.153497 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.153510 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:53Z","lastTransitionTime":"2025-12-10T00:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.256729 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.256821 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.256848 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.256890 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.256920 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:53Z","lastTransitionTime":"2025-12-10T00:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.286544 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.286595 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:53 crc kubenswrapper[4884]: E1210 00:30:53.286799 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:30:53 crc kubenswrapper[4884]: E1210 00:30:53.286930 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.360629 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.360678 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.360692 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.360713 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.360728 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:53Z","lastTransitionTime":"2025-12-10T00:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.464856 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.464915 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.464934 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.464958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.464971 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:53Z","lastTransitionTime":"2025-12-10T00:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.487124 4884 generic.go:334] "Generic (PLEG): container finished" podID="1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7" containerID="89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10" exitCode=0 Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.487241 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" event={"ID":"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7","Type":"ContainerDied","Data":"89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.489786 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-4dtwx" event={"ID":"ae121743-2040-4f84-8bb7-4a04cff7bd31","Type":"ContainerStarted","Data":"c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.489861 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-4dtwx" event={"ID":"ae121743-2040-4f84-8bb7-4a04cff7bd31","Type":"ContainerStarted","Data":"74e73af6f14060790a118a043a85d519e74eed918201e11128e8ed253d4c7177"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.509489 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.530580 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.546969 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.563056 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.567911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.567986 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.568006 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.568035 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.568058 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:53Z","lastTransitionTime":"2025-12-10T00:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.578527 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.594249 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.614492 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.630189 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.650573 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.666553 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.671522 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.671577 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.671594 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.671620 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.671639 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:53Z","lastTransitionTime":"2025-12-10T00:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.684138 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.703002 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.717614 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.730082 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.744396 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.761675 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.774641 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.774705 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.774718 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.774751 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.774764 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:53Z","lastTransitionTime":"2025-12-10T00:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.778204 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.800948 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.816960 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.833925 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.853629 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.878000 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.878052 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.878063 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.878084 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.878099 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:53Z","lastTransitionTime":"2025-12-10T00:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.878585 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.894982 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.921065 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.947900 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.969537 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.981125 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.981174 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.981183 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.981204 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.981219 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:53Z","lastTransitionTime":"2025-12-10T00:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:53 crc kubenswrapper[4884]: I1210 00:30:53.990733 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.013734 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.084996 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.085058 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.085070 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.085092 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.085107 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:54Z","lastTransitionTime":"2025-12-10T00:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.187286 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.187356 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.187376 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.187402 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.187421 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:54Z","lastTransitionTime":"2025-12-10T00:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.286113 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:54 crc kubenswrapper[4884]: E1210 00:30:54.286301 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.290591 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.290649 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.290660 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.290682 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.290696 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:54Z","lastTransitionTime":"2025-12-10T00:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.393326 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.393375 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.393388 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.393407 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.393423 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:54Z","lastTransitionTime":"2025-12-10T00:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.498789 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.498834 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.498848 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.498871 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.498885 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:54Z","lastTransitionTime":"2025-12-10T00:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.503269 4884 generic.go:334] "Generic (PLEG): container finished" podID="1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7" containerID="0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072" exitCode=0 Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.503340 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" event={"ID":"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7","Type":"ContainerDied","Data":"0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072"} Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.521253 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.549509 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.575338 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.593150 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.606065 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.606108 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.606118 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.606135 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.606146 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:54Z","lastTransitionTime":"2025-12-10T00:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.611755 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.626868 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.638494 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.652472 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.670213 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.688032 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.703216 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.715151 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.715200 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.715213 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.715234 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.715253 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:54Z","lastTransitionTime":"2025-12-10T00:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.721137 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.738811 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.754714 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.821114 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.821175 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.821183 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.821198 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.821207 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:54Z","lastTransitionTime":"2025-12-10T00:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.924618 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.924674 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.924687 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.924708 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:54 crc kubenswrapper[4884]: I1210 00:30:54.924721 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:54Z","lastTransitionTime":"2025-12-10T00:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.029471 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.029529 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.029543 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.029568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.029582 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:55Z","lastTransitionTime":"2025-12-10T00:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.133405 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.133523 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.133551 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.133586 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.133748 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:55Z","lastTransitionTime":"2025-12-10T00:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.236680 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.236744 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.236765 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.236793 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.236816 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:55Z","lastTransitionTime":"2025-12-10T00:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.286186 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:55 crc kubenswrapper[4884]: E1210 00:30:55.286512 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.287589 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:55 crc kubenswrapper[4884]: E1210 00:30:55.287768 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.340614 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.340687 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.340707 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.340735 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.340754 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:55Z","lastTransitionTime":"2025-12-10T00:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.444230 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.444279 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.444292 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.444313 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.444327 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:55Z","lastTransitionTime":"2025-12-10T00:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.514052 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232"} Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.518394 4884 generic.go:334] "Generic (PLEG): container finished" podID="1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7" containerID="1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34" exitCode=0 Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.518483 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" event={"ID":"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7","Type":"ContainerDied","Data":"1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34"} Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.534490 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.548155 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.548248 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.548272 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.548306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.548332 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:55Z","lastTransitionTime":"2025-12-10T00:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.559176 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.590159 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.607608 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.625688 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.639899 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.652355 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.654411 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.654492 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.654507 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.654530 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.654542 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:55Z","lastTransitionTime":"2025-12-10T00:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.667742 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.682757 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.695681 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.711475 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.725060 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.737809 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.751174 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:55Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.757264 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.757325 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.757359 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.757384 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.757400 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:55Z","lastTransitionTime":"2025-12-10T00:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.863889 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.863945 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.863960 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.863980 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.863994 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:55Z","lastTransitionTime":"2025-12-10T00:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.966220 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.966682 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.966691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.966707 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:55 crc kubenswrapper[4884]: I1210 00:30:55.966751 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:55Z","lastTransitionTime":"2025-12-10T00:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.069810 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.069865 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.069886 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.069949 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.070019 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:56Z","lastTransitionTime":"2025-12-10T00:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.173420 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.173542 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.173560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.173586 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.173605 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:56Z","lastTransitionTime":"2025-12-10T00:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.276349 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.276394 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.276406 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.276424 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.276456 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:56Z","lastTransitionTime":"2025-12-10T00:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.286879 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:56 crc kubenswrapper[4884]: E1210 00:30:56.286999 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.379282 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.379335 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.379347 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.379366 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.379379 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:56Z","lastTransitionTime":"2025-12-10T00:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.483579 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.483629 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.483642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.483663 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.483673 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:56Z","lastTransitionTime":"2025-12-10T00:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.526581 4884 generic.go:334] "Generic (PLEG): container finished" podID="1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7" containerID="e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad" exitCode=0 Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.526638 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" event={"ID":"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7","Type":"ContainerDied","Data":"e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad"} Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.554377 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.571622 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.584907 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.587269 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.587312 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.587325 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.587345 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.587355 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:56Z","lastTransitionTime":"2025-12-10T00:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.599987 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.614072 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.627865 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.639173 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.658695 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.681131 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.690255 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.690301 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.690321 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.690350 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.690371 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:56Z","lastTransitionTime":"2025-12-10T00:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.699192 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.715918 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.730638 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.745977 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.761099 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:56Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.793175 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.793228 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.793242 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.793265 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.793282 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:56Z","lastTransitionTime":"2025-12-10T00:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.895986 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.896053 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.896078 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.896109 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.896131 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:56Z","lastTransitionTime":"2025-12-10T00:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:56 crc kubenswrapper[4884]: I1210 00:30:56.999827 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:56.999938 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:56.999965 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.000398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.000947 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:57Z","lastTransitionTime":"2025-12-10T00:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.065966 4884 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.103464 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.104173 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.104236 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.104251 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.104274 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.104288 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:57Z","lastTransitionTime":"2025-12-10T00:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.125640 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.150646 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.174471 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.195569 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.208189 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.208268 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.208288 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.208320 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.208343 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:57Z","lastTransitionTime":"2025-12-10T00:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.215205 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.232822 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.258740 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.273910 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.287059 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.287150 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:57 crc kubenswrapper[4884]: E1210 00:30:57.287215 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:30:57 crc kubenswrapper[4884]: E1210 00:30:57.287315 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.297391 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.310932 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.310964 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.310974 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.310991 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.311024 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:57Z","lastTransitionTime":"2025-12-10T00:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.316283 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.332035 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.353678 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.369180 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.385821 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.402210 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.415055 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.415101 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.415117 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.415137 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.415151 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:57Z","lastTransitionTime":"2025-12-10T00:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.418136 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.432068 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.448736 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.462044 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.476052 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.493193 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.514070 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.518879 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.518935 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.518953 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.518980 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.519003 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:57Z","lastTransitionTime":"2025-12-10T00:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.541007 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"889ff6ec9e7ffcf38ff2eee2868dc82ecf56b6d47320d6d012e22ac8593407d7"} Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.541403 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.547230 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.547412 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" event={"ID":"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7","Type":"ContainerStarted","Data":"a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284"} Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.563563 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.623005 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.624675 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.625195 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.625247 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.625279 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.625308 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.625331 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:57Z","lastTransitionTime":"2025-12-10T00:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.646813 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.662753 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.683342 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.701410 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.717381 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.728286 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.728324 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.728336 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.728354 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.728366 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:57Z","lastTransitionTime":"2025-12-10T00:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.731549 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.745913 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.762485 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.775477 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.793436 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.811742 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.828252 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.832242 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.832315 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.832339 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.832375 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.832400 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:57Z","lastTransitionTime":"2025-12-10T00:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.848949 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.863896 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.885682 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.910307 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://889ff6ec9e7ffcf38ff2eee2868dc82ecf56b6d47320d6d012e22ac8593407d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.930248 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.935947 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.936007 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.936021 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.936046 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:57 crc kubenswrapper[4884]: I1210 00:30:57.936064 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:57Z","lastTransitionTime":"2025-12-10T00:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.039188 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.039242 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.039257 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.039279 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.039295 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.142057 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.142585 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.142722 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.142856 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.143012 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.246944 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.247040 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.247058 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.247086 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.247104 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.286543 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:30:58 crc kubenswrapper[4884]: E1210 00:30:58.287017 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.349842 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.349904 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.349917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.349939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.349958 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.453762 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.453829 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.453842 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.453872 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.453890 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.551746 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.552296 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.559249 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.559333 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.559349 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.559373 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.559394 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.582116 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.602138 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.619564 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.636108 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.657223 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.663964 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.664378 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.664869 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.665267 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.666410 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.680083 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.699878 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.720084 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.744288 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.764513 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.770584 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.770654 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.770675 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.770706 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.770724 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.786331 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.796288 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.796503 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.796609 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.796706 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.796796 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.807798 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: E1210 00:30:58.811860 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.816177 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.816321 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.816464 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.816585 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.816688 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.822948 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: E1210 00:30:58.840290 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.842665 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.846666 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.846787 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.846823 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.846861 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.846887 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: E1210 00:30:58.870312 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.876018 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://889ff6ec9e7ffcf38ff2eee2868dc82ecf56b6d47320d6d012e22ac8593407d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.877379 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.877472 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.877490 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.877521 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.877538 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: E1210 00:30:58.896169 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.903691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.903985 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.904119 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.904219 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.904326 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:58 crc kubenswrapper[4884]: E1210 00:30:58.927588 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:30:58Z is after 2025-08-24T17:21:41Z" Dec 10 00:30:58 crc kubenswrapper[4884]: E1210 00:30:58.927826 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.930276 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.930419 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.930524 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.930602 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:58 crc kubenswrapper[4884]: I1210 00:30:58.930663 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:58Z","lastTransitionTime":"2025-12-10T00:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.033160 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.033220 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.033233 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.033256 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.033272 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:59Z","lastTransitionTime":"2025-12-10T00:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.136329 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.136362 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.136372 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.136387 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.136397 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:59Z","lastTransitionTime":"2025-12-10T00:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.238988 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.239030 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.239041 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.239057 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.239067 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:59Z","lastTransitionTime":"2025-12-10T00:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.286214 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.286277 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:30:59 crc kubenswrapper[4884]: E1210 00:30:59.286388 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:30:59 crc kubenswrapper[4884]: E1210 00:30:59.286602 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.342812 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.342869 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.342915 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.342937 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.342955 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:59Z","lastTransitionTime":"2025-12-10T00:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.446607 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.446675 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.446698 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.446728 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.446747 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:59Z","lastTransitionTime":"2025-12-10T00:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.550946 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.550994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.551014 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.551040 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.551057 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:59Z","lastTransitionTime":"2025-12-10T00:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.555142 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.654429 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.654540 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.654569 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.654604 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.654624 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:59Z","lastTransitionTime":"2025-12-10T00:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.757348 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.757416 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.757468 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.757502 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.757521 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:59Z","lastTransitionTime":"2025-12-10T00:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.860069 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.860137 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.860160 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.860190 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.860211 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:59Z","lastTransitionTime":"2025-12-10T00:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.963596 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.963653 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.963670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.963696 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:30:59 crc kubenswrapper[4884]: I1210 00:30:59.963714 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:30:59Z","lastTransitionTime":"2025-12-10T00:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.067026 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.067081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.067100 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.067134 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.067156 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:00Z","lastTransitionTime":"2025-12-10T00:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.170319 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.170362 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.170370 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.170387 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.170396 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:00Z","lastTransitionTime":"2025-12-10T00:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.273512 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.273585 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.273604 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.273638 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.273659 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:00Z","lastTransitionTime":"2025-12-10T00:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.286933 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:00 crc kubenswrapper[4884]: E1210 00:31:00.287113 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.376131 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.376191 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.376204 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.376226 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.376240 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:00Z","lastTransitionTime":"2025-12-10T00:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.479639 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.479697 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.479708 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.479726 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.479738 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:00Z","lastTransitionTime":"2025-12-10T00:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.564809 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/0.log" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.569042 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="889ff6ec9e7ffcf38ff2eee2868dc82ecf56b6d47320d6d012e22ac8593407d7" exitCode=1 Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.569100 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"889ff6ec9e7ffcf38ff2eee2868dc82ecf56b6d47320d6d012e22ac8593407d7"} Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.570375 4884 scope.go:117] "RemoveContainer" containerID="889ff6ec9e7ffcf38ff2eee2868dc82ecf56b6d47320d6d012e22ac8593407d7" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.582014 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.582072 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.582090 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.582114 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.582132 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:00Z","lastTransitionTime":"2025-12-10T00:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.594195 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.609756 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.634544 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.659616 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://889ff6ec9e7ffcf38ff2eee2868dc82ecf56b6d47320d6d012e22ac8593407d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://889ff6ec9e7ffcf38ff2eee2868dc82ecf56b6d47320d6d012e22ac8593407d7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:00Z\\\",\\\"message\\\":\\\" 00:30:59.712364 6207 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1210 00:30:59.712621 6207 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1210 00:30:59.712682 6207 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1210 00:30:59.712700 6207 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.712933 6207 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1210 00:30:59.712980 6207 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1210 00:30:59.713112 6207 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.713377 6207 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.713874 6207 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 00:30:59.713989 6207 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.714827 6207 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.678244 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.685958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.686026 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.686047 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.686077 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.686096 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:00Z","lastTransitionTime":"2025-12-10T00:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.694164 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.708415 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.721993 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.740304 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.756028 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.773004 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.789121 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.790379 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.790504 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.790522 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.790568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.790584 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:00Z","lastTransitionTime":"2025-12-10T00:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.805418 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.822049 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.893584 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.893638 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.893658 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.893686 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.893705 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:00Z","lastTransitionTime":"2025-12-10T00:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.909471 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.909617 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:00 crc kubenswrapper[4884]: E1210 00:31:00.909697 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:31:16.909659604 +0000 UTC m=+49.987616721 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:31:00 crc kubenswrapper[4884]: E1210 00:31:00.909756 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.909776 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:00 crc kubenswrapper[4884]: E1210 00:31:00.909858 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:31:16.90982677 +0000 UTC m=+49.987783917 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:31:00 crc kubenswrapper[4884]: E1210 00:31:00.909948 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:31:00 crc kubenswrapper[4884]: E1210 00:31:00.910022 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:31:16.910005025 +0000 UTC m=+49.987962152 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.996929 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.996992 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.997010 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.997049 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:00 crc kubenswrapper[4884]: I1210 00:31:00.997071 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:00Z","lastTransitionTime":"2025-12-10T00:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.010880 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.010937 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:01 crc kubenswrapper[4884]: E1210 00:31:01.011116 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:31:01 crc kubenswrapper[4884]: E1210 00:31:01.011143 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:31:01 crc kubenswrapper[4884]: E1210 00:31:01.011175 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:31:01 crc kubenswrapper[4884]: E1210 00:31:01.011230 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:31:01 crc kubenswrapper[4884]: E1210 00:31:01.011152 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:31:01 crc kubenswrapper[4884]: E1210 00:31:01.011255 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:31:01 crc kubenswrapper[4884]: E1210 00:31:01.011358 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 00:31:17.011290072 +0000 UTC m=+50.089247219 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:31:01 crc kubenswrapper[4884]: E1210 00:31:01.011390 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 00:31:17.011376534 +0000 UTC m=+50.089333681 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.100736 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.100831 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.100859 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.100897 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.100928 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:01Z","lastTransitionTime":"2025-12-10T00:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.204471 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.204553 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.204570 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.204595 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.204611 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:01Z","lastTransitionTime":"2025-12-10T00:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.286907 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.287072 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:01 crc kubenswrapper[4884]: E1210 00:31:01.287169 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:01 crc kubenswrapper[4884]: E1210 00:31:01.287339 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.307158 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.307218 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.307238 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.307266 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.307281 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:01Z","lastTransitionTime":"2025-12-10T00:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.410031 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.410109 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.410120 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.410153 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.410170 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:01Z","lastTransitionTime":"2025-12-10T00:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.512598 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.512661 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.512673 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.512696 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.512710 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:01Z","lastTransitionTime":"2025-12-10T00:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.582493 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/0.log" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.590258 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328"} Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.590341 4884 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.606992 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.615569 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.615595 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.615606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.615624 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.615635 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:01Z","lastTransitionTime":"2025-12-10T00:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.623473 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.637970 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.664398 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.685720 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://889ff6ec9e7ffcf38ff2eee2868dc82ecf56b6d47320d6d012e22ac8593407d7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:00Z\\\",\\\"message\\\":\\\" 00:30:59.712364 6207 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1210 00:30:59.712621 6207 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1210 00:30:59.712682 6207 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1210 00:30:59.712700 6207 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.712933 6207 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1210 00:30:59.712980 6207 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1210 00:30:59.713112 6207 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.713377 6207 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.713874 6207 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 00:30:59.713989 6207 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.714827 6207 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.700210 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.716151 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.718784 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.718830 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.718840 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.718858 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.718870 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:01Z","lastTransitionTime":"2025-12-10T00:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.731853 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.749138 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.765010 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.783101 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.800461 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.819469 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.821583 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.821621 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.821650 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.821668 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.821678 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:01Z","lastTransitionTime":"2025-12-10T00:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.839736 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:01Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.924860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.924934 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.924959 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.924993 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:01 crc kubenswrapper[4884]: I1210 00:31:01.925015 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:01Z","lastTransitionTime":"2025-12-10T00:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.028559 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.028632 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.028652 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.028678 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.028696 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:02Z","lastTransitionTime":"2025-12-10T00:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.132240 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.132299 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.132316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.132337 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.132352 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:02Z","lastTransitionTime":"2025-12-10T00:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.235560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.235633 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.235657 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.235699 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.235724 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:02Z","lastTransitionTime":"2025-12-10T00:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.286248 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:02 crc kubenswrapper[4884]: E1210 00:31:02.286544 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.338933 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.339014 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.339031 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.339059 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.339078 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:02Z","lastTransitionTime":"2025-12-10T00:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.442041 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.442110 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.442126 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.442150 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.442166 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:02Z","lastTransitionTime":"2025-12-10T00:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.545619 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.545676 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.545692 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.545719 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.545738 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:02Z","lastTransitionTime":"2025-12-10T00:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.598600 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/1.log" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.600128 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/0.log" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.605253 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328" exitCode=1 Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.605339 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328"} Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.605469 4884 scope.go:117] "RemoveContainer" containerID="889ff6ec9e7ffcf38ff2eee2868dc82ecf56b6d47320d6d012e22ac8593407d7" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.606402 4884 scope.go:117] "RemoveContainer" containerID="fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328" Dec 10 00:31:02 crc kubenswrapper[4884]: E1210 00:31:02.606781 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.628897 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.648888 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.648936 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.648952 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.648977 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.648996 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:02Z","lastTransitionTime":"2025-12-10T00:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.650866 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.672412 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.691677 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.707869 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.730740 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.752294 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.752340 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.752353 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.752369 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.752379 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:02Z","lastTransitionTime":"2025-12-10T00:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.757086 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.774426 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.797622 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.824295 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://889ff6ec9e7ffcf38ff2eee2868dc82ecf56b6d47320d6d012e22ac8593407d7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:00Z\\\",\\\"message\\\":\\\" 00:30:59.712364 6207 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1210 00:30:59.712621 6207 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1210 00:30:59.712682 6207 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1210 00:30:59.712700 6207 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.712933 6207 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1210 00:30:59.712980 6207 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1210 00:30:59.713112 6207 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.713377 6207 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.713874 6207 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 00:30:59.713989 6207 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.714827 6207 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.849922 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.855226 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.855269 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.855283 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.855306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.855326 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:02Z","lastTransitionTime":"2025-12-10T00:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.873062 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.890557 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.906695 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:02Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.958728 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.958793 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.958815 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.958876 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:02 crc kubenswrapper[4884]: I1210 00:31:02.958896 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:02Z","lastTransitionTime":"2025-12-10T00:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.061889 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.061950 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.061968 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.061996 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.062013 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:03Z","lastTransitionTime":"2025-12-10T00:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.165725 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.165798 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.165818 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.165849 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.165871 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:03Z","lastTransitionTime":"2025-12-10T00:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.269600 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.269675 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.269700 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.269732 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.269752 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:03Z","lastTransitionTime":"2025-12-10T00:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.286267 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.286343 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:03 crc kubenswrapper[4884]: E1210 00:31:03.286549 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:03 crc kubenswrapper[4884]: E1210 00:31:03.286872 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.363166 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p"] Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.364079 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.369428 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.369516 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.373261 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.373466 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.373726 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.373951 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.374061 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:03Z","lastTransitionTime":"2025-12-10T00:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.389139 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.411699 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.429115 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.438695 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd-env-overrides\") pod \"ovnkube-control-plane-749d76644c-bwm7p\" (UID: \"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.438760 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-bwm7p\" (UID: \"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.438874 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpcw8\" (UniqueName: \"kubernetes.io/projected/3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd-kube-api-access-dpcw8\") pod \"ovnkube-control-plane-749d76644c-bwm7p\" (UID: \"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.438921 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-bwm7p\" (UID: \"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.456326 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.517696 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.517752 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.517762 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.517784 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.517805 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:03Z","lastTransitionTime":"2025-12-10T00:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.520930 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://889ff6ec9e7ffcf38ff2eee2868dc82ecf56b6d47320d6d012e22ac8593407d7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:00Z\\\",\\\"message\\\":\\\" 00:30:59.712364 6207 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1210 00:30:59.712621 6207 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1210 00:30:59.712682 6207 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1210 00:30:59.712700 6207 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.712933 6207 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1210 00:30:59.712980 6207 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1210 00:30:59.713112 6207 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.713377 6207 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.713874 6207 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1210 00:30:59.713989 6207 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:30:59.714827 6207 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.535980 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.539906 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-bwm7p\" (UID: \"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.539955 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd-env-overrides\") pod \"ovnkube-control-plane-749d76644c-bwm7p\" (UID: \"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.539983 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-bwm7p\" (UID: \"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.540054 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpcw8\" (UniqueName: \"kubernetes.io/projected/3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd-kube-api-access-dpcw8\") pod \"ovnkube-control-plane-749d76644c-bwm7p\" (UID: \"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.541086 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd-env-overrides\") pod \"ovnkube-control-plane-749d76644c-bwm7p\" (UID: \"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.542685 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-bwm7p\" (UID: \"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.555073 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-bwm7p\" (UID: \"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.556682 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.565769 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpcw8\" (UniqueName: \"kubernetes.io/projected/3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd-kube-api-access-dpcw8\") pod \"ovnkube-control-plane-749d76644c-bwm7p\" (UID: \"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.579587 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.598974 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.612193 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/1.log" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.621297 4884 scope.go:117] "RemoveContainer" containerID="fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.621592 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: E1210 00:31:03.622463 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.624918 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.624975 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.624989 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.625012 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.625029 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:03Z","lastTransitionTime":"2025-12-10T00:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.642974 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.661691 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.678014 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.684043 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.695714 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.712498 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.727871 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.727925 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.727939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.727963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.727976 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:03Z","lastTransitionTime":"2025-12-10T00:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.732645 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.751206 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.773864 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.791699 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.808826 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.822578 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.832330 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.832390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.832407 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.832450 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.832465 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:03Z","lastTransitionTime":"2025-12-10T00:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.844381 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.863945 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.878158 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.896151 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.909765 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.933567 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.934946 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.935003 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.935022 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.935050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.935067 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:03Z","lastTransitionTime":"2025-12-10T00:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.962020 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:03 crc kubenswrapper[4884]: I1210 00:31:03.987281 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:03Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.007936 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.038407 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.038475 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.038486 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.038504 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.038517 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:04Z","lastTransitionTime":"2025-12-10T00:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.142806 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.142936 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.142955 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.142982 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.143002 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:04Z","lastTransitionTime":"2025-12-10T00:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.247605 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.247672 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.247692 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.247720 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.247742 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:04Z","lastTransitionTime":"2025-12-10T00:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.287082 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:04 crc kubenswrapper[4884]: E1210 00:31:04.287309 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.350941 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.351465 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.351490 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.351522 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.351542 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:04Z","lastTransitionTime":"2025-12-10T00:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.454782 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.454840 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.454857 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.454882 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.454900 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:04Z","lastTransitionTime":"2025-12-10T00:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.558224 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.558273 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.558296 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.558323 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.558338 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:04Z","lastTransitionTime":"2025-12-10T00:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.628350 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" event={"ID":"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd","Type":"ContainerStarted","Data":"3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691"} Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.628541 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" event={"ID":"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd","Type":"ContainerStarted","Data":"22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b"} Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.628585 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" event={"ID":"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd","Type":"ContainerStarted","Data":"5df1fc3ad26aeb45a3a8de0e9887d083345a73f6e6ae7c0951fcf75cdaa7203e"} Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.652357 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.661371 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.661524 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.661549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.661576 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.661605 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:04Z","lastTransitionTime":"2025-12-10T00:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.668270 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.686171 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.708097 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.725036 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.743892 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.761265 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.764337 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.764373 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.764390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.764408 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.764422 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:04Z","lastTransitionTime":"2025-12-10T00:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.782603 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.800602 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.820155 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.836175 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.849145 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.867560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.867652 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.867671 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.867702 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.867723 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:04Z","lastTransitionTime":"2025-12-10T00:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.869248 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.900305 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.926990 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:04Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.970298 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.970351 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.970367 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.970390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:04 crc kubenswrapper[4884]: I1210 00:31:04.970408 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:04Z","lastTransitionTime":"2025-12-10T00:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.073781 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.073837 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.073849 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.073873 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.073889 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:05Z","lastTransitionTime":"2025-12-10T00:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.177380 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.177484 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.177505 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.177533 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.177553 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:05Z","lastTransitionTime":"2025-12-10T00:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.280429 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.280518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.280531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.280553 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.280568 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:05Z","lastTransitionTime":"2025-12-10T00:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.286847 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:05 crc kubenswrapper[4884]: E1210 00:31:05.287026 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.286858 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:05 crc kubenswrapper[4884]: E1210 00:31:05.287156 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.335157 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-ndwnl"] Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.335978 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:05 crc kubenswrapper[4884]: E1210 00:31:05.336074 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.358661 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.382800 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.384090 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.384162 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.384183 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.384211 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.384233 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:05Z","lastTransitionTime":"2025-12-10T00:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.405129 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.424418 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.442759 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.460140 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.463173 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.463228 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwpg9\" (UniqueName: \"kubernetes.io/projected/ec324800-e820-40c0-8b51-b020075f09eb-kube-api-access-dwpg9\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.485865 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.487791 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.487853 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.487872 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.487897 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.487913 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:05Z","lastTransitionTime":"2025-12-10T00:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.511099 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.527056 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.551268 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.564533 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.564621 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwpg9\" (UniqueName: \"kubernetes.io/projected/ec324800-e820-40c0-8b51-b020075f09eb-kube-api-access-dwpg9\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:05 crc kubenswrapper[4884]: E1210 00:31:05.564888 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:05 crc kubenswrapper[4884]: E1210 00:31:05.565014 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs podName:ec324800-e820-40c0-8b51-b020075f09eb nodeName:}" failed. No retries permitted until 2025-12-10 00:31:06.064981703 +0000 UTC m=+39.142938860 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs") pod "network-metrics-daemon-ndwnl" (UID: "ec324800-e820-40c0-8b51-b020075f09eb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.567263 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.585118 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.587603 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwpg9\" (UniqueName: \"kubernetes.io/projected/ec324800-e820-40c0-8b51-b020075f09eb-kube-api-access-dwpg9\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.590541 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.590788 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.590925 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.591055 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.591188 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:05Z","lastTransitionTime":"2025-12-10T00:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.608026 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.625101 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.646344 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.661169 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:05Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.695652 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.695736 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.695752 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.695775 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.695791 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:05Z","lastTransitionTime":"2025-12-10T00:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.799369 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.799416 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.799450 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.799470 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.799488 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:05Z","lastTransitionTime":"2025-12-10T00:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.904166 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.904253 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.904278 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.904314 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:05 crc kubenswrapper[4884]: I1210 00:31:05.904338 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:05Z","lastTransitionTime":"2025-12-10T00:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.009055 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.009112 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.009130 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.009157 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.009177 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:06Z","lastTransitionTime":"2025-12-10T00:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.072234 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:06 crc kubenswrapper[4884]: E1210 00:31:06.072388 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:06 crc kubenswrapper[4884]: E1210 00:31:06.072537 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs podName:ec324800-e820-40c0-8b51-b020075f09eb nodeName:}" failed. No retries permitted until 2025-12-10 00:31:07.072503479 +0000 UTC m=+40.150460826 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs") pod "network-metrics-daemon-ndwnl" (UID: "ec324800-e820-40c0-8b51-b020075f09eb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.112632 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.112682 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.112695 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.112713 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.112752 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:06Z","lastTransitionTime":"2025-12-10T00:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.216829 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.216927 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.216959 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.216992 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.217018 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:06Z","lastTransitionTime":"2025-12-10T00:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.286611 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:06 crc kubenswrapper[4884]: E1210 00:31:06.286864 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.320766 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.320882 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.320910 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.320941 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.321155 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:06Z","lastTransitionTime":"2025-12-10T00:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.425178 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.425250 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.425267 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.425302 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.425320 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:06Z","lastTransitionTime":"2025-12-10T00:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.530080 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.530132 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.530143 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.530162 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.530202 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:06Z","lastTransitionTime":"2025-12-10T00:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.633360 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.633691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.633713 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.633740 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.633757 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:06Z","lastTransitionTime":"2025-12-10T00:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.737463 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.738352 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.738549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.738704 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.739036 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:06Z","lastTransitionTime":"2025-12-10T00:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.842591 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.842670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.842692 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.842722 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.842741 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:06Z","lastTransitionTime":"2025-12-10T00:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.946571 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.946641 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.946660 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.946690 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:06 crc kubenswrapper[4884]: I1210 00:31:06.946709 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:06Z","lastTransitionTime":"2025-12-10T00:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.050045 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.050125 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.050135 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.050159 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.050178 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:07Z","lastTransitionTime":"2025-12-10T00:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.085358 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:07 crc kubenswrapper[4884]: E1210 00:31:07.085620 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:07 crc kubenswrapper[4884]: E1210 00:31:07.085736 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs podName:ec324800-e820-40c0-8b51-b020075f09eb nodeName:}" failed. No retries permitted until 2025-12-10 00:31:09.085708989 +0000 UTC m=+42.163666136 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs") pod "network-metrics-daemon-ndwnl" (UID: "ec324800-e820-40c0-8b51-b020075f09eb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.153682 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.153745 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.153765 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.153793 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.153812 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:07Z","lastTransitionTime":"2025-12-10T00:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.256933 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.257013 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.257031 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.257062 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.257086 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:07Z","lastTransitionTime":"2025-12-10T00:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.286513 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.286639 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:07 crc kubenswrapper[4884]: E1210 00:31:07.286708 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:07 crc kubenswrapper[4884]: E1210 00:31:07.286915 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.286668 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:07 crc kubenswrapper[4884]: E1210 00:31:07.287285 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.312903 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.333711 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.355114 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.361634 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.361701 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.361722 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.361751 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.361771 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:07Z","lastTransitionTime":"2025-12-10T00:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.382210 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.409082 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.431105 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.453379 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.465926 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.465963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.465977 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.466000 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.466017 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:07Z","lastTransitionTime":"2025-12-10T00:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.474459 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.490920 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.514591 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.542116 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.560546 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.568810 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.568855 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.568869 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.568892 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.568909 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:07Z","lastTransitionTime":"2025-12-10T00:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.579670 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.598016 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.618000 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.638250 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.672180 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.672246 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.672260 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.672287 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.672313 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:07Z","lastTransitionTime":"2025-12-10T00:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.776887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.777703 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.777764 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.777807 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.777835 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:07Z","lastTransitionTime":"2025-12-10T00:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.883331 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.883397 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.883409 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.883456 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.883478 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:07Z","lastTransitionTime":"2025-12-10T00:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.986555 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.986642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.986667 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.986702 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:07 crc kubenswrapper[4884]: I1210 00:31:07.986727 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:07Z","lastTransitionTime":"2025-12-10T00:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.091239 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.091320 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.091344 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.091377 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.091402 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:08Z","lastTransitionTime":"2025-12-10T00:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.195023 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.195362 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.195563 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.195705 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.195854 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:08Z","lastTransitionTime":"2025-12-10T00:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.286388 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:08 crc kubenswrapper[4884]: E1210 00:31:08.286952 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.299250 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.299640 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.299681 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.299707 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.299724 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:08Z","lastTransitionTime":"2025-12-10T00:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.403719 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.403772 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.403790 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.403813 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.403829 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:08Z","lastTransitionTime":"2025-12-10T00:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.506858 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.506928 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.506940 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.506963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.506975 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:08Z","lastTransitionTime":"2025-12-10T00:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.609582 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.609623 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.609633 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.609650 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.609665 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:08Z","lastTransitionTime":"2025-12-10T00:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.713248 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.713379 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.713402 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.713457 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.713478 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:08Z","lastTransitionTime":"2025-12-10T00:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.818026 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.818085 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.818096 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.818118 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.818135 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:08Z","lastTransitionTime":"2025-12-10T00:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.922197 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.922272 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.922282 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.922303 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:08 crc kubenswrapper[4884]: I1210 00:31:08.922315 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:08Z","lastTransitionTime":"2025-12-10T00:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.025891 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.025981 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.026007 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.026045 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.026071 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.110841 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:09 crc kubenswrapper[4884]: E1210 00:31:09.111090 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:09 crc kubenswrapper[4884]: E1210 00:31:09.111195 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs podName:ec324800-e820-40c0-8b51-b020075f09eb nodeName:}" failed. No retries permitted until 2025-12-10 00:31:13.111163841 +0000 UTC m=+46.189120988 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs") pod "network-metrics-daemon-ndwnl" (UID: "ec324800-e820-40c0-8b51-b020075f09eb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.129974 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.130033 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.130042 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.130093 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.130109 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.232991 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.233078 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.233097 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.233126 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.233147 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.287044 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.287102 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.287177 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:09 crc kubenswrapper[4884]: E1210 00:31:09.287521 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:09 crc kubenswrapper[4884]: E1210 00:31:09.287621 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:09 crc kubenswrapper[4884]: E1210 00:31:09.287704 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.309005 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.309084 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.309102 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.309145 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.309167 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: E1210 00:31:09.331699 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:09Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.338494 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.338553 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.338606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.338649 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.338673 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: E1210 00:31:09.360229 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:09Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.366480 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.366554 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.366572 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.366607 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.366633 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: E1210 00:31:09.389422 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:09Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.395934 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.396015 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.396045 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.396078 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.396100 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: E1210 00:31:09.428171 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:09Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.435699 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.435774 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.435798 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.435830 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.435853 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: E1210 00:31:09.457201 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:09Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:09 crc kubenswrapper[4884]: E1210 00:31:09.457502 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.460515 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.460591 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.460616 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.460644 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.460663 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.564194 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.564275 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.564294 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.564386 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.564407 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.601100 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.602549 4884 scope.go:117] "RemoveContainer" containerID="fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328" Dec 10 00:31:09 crc kubenswrapper[4884]: E1210 00:31:09.602817 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.668879 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.668950 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.668976 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.669016 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.669040 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.772898 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.772964 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.772991 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.773021 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.773048 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.876801 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.876868 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.876887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.876913 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.876933 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.980410 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.980516 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.980535 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.980563 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:09 crc kubenswrapper[4884]: I1210 00:31:09.980582 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:09Z","lastTransitionTime":"2025-12-10T00:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.083979 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.084063 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.084089 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.084123 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.084148 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:10Z","lastTransitionTime":"2025-12-10T00:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.187370 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.187414 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.187424 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.187458 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.187468 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:10Z","lastTransitionTime":"2025-12-10T00:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.286042 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:10 crc kubenswrapper[4884]: E1210 00:31:10.286158 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.291214 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.291254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.291390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.291406 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.291419 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:10Z","lastTransitionTime":"2025-12-10T00:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.393875 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.393951 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.393974 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.394003 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.394026 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:10Z","lastTransitionTime":"2025-12-10T00:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.497673 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.497738 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.497752 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.497773 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.497787 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:10Z","lastTransitionTime":"2025-12-10T00:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.600726 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.600876 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.600898 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.600924 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.600942 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:10Z","lastTransitionTime":"2025-12-10T00:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.704394 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.704531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.704556 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.704586 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.704610 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:10Z","lastTransitionTime":"2025-12-10T00:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.808401 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.808486 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.808497 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.808519 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.808532 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:10Z","lastTransitionTime":"2025-12-10T00:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.911692 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.911770 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.911792 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.911821 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:10 crc kubenswrapper[4884]: I1210 00:31:10.911839 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:10Z","lastTransitionTime":"2025-12-10T00:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.014899 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.014952 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.014969 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.014997 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.015019 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:11Z","lastTransitionTime":"2025-12-10T00:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.118011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.118071 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.118093 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.118120 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.118139 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:11Z","lastTransitionTime":"2025-12-10T00:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.221785 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.221851 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.221871 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.221899 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.221931 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:11Z","lastTransitionTime":"2025-12-10T00:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.286545 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.286633 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.286678 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:11 crc kubenswrapper[4884]: E1210 00:31:11.286737 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:11 crc kubenswrapper[4884]: E1210 00:31:11.286828 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:11 crc kubenswrapper[4884]: E1210 00:31:11.286973 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.324803 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.324860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.324874 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.324898 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.324916 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:11Z","lastTransitionTime":"2025-12-10T00:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.428545 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.428631 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.428656 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.428691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.428716 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:11Z","lastTransitionTime":"2025-12-10T00:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.533466 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.533541 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.533560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.533592 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.533613 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:11Z","lastTransitionTime":"2025-12-10T00:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.637373 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.637486 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.637508 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.637537 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.637557 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:11Z","lastTransitionTime":"2025-12-10T00:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.741832 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.741910 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.741928 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.741970 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.741990 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:11Z","lastTransitionTime":"2025-12-10T00:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.846869 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.846994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.847018 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.847051 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.847083 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:11Z","lastTransitionTime":"2025-12-10T00:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.951860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.951980 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.952002 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.952072 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:11 crc kubenswrapper[4884]: I1210 00:31:11.953936 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:11Z","lastTransitionTime":"2025-12-10T00:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.058956 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.059041 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.059062 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.059096 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.059121 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:12Z","lastTransitionTime":"2025-12-10T00:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.163388 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.163499 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.163518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.163546 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.163566 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:12Z","lastTransitionTime":"2025-12-10T00:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.267792 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.267884 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.267909 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.267939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.267961 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:12Z","lastTransitionTime":"2025-12-10T00:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.286542 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:12 crc kubenswrapper[4884]: E1210 00:31:12.286713 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.370732 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.370806 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.370829 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.370858 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.370883 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:12Z","lastTransitionTime":"2025-12-10T00:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.473855 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.473920 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.473940 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.473963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.473980 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:12Z","lastTransitionTime":"2025-12-10T00:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.577669 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.577713 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.577729 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.577753 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.577769 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:12Z","lastTransitionTime":"2025-12-10T00:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.681654 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.681736 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.681761 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.681793 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.681849 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:12Z","lastTransitionTime":"2025-12-10T00:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.784182 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.784264 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.784290 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.784318 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.784336 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:12Z","lastTransitionTime":"2025-12-10T00:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.886935 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.887003 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.887023 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.887050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.887068 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:12Z","lastTransitionTime":"2025-12-10T00:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.989762 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.989836 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.989856 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.989885 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:12 crc kubenswrapper[4884]: I1210 00:31:12.989904 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:12Z","lastTransitionTime":"2025-12-10T00:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.094367 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.094489 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.094513 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.094540 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.094560 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:13Z","lastTransitionTime":"2025-12-10T00:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.161415 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:13 crc kubenswrapper[4884]: E1210 00:31:13.161691 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:13 crc kubenswrapper[4884]: E1210 00:31:13.161829 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs podName:ec324800-e820-40c0-8b51-b020075f09eb nodeName:}" failed. No retries permitted until 2025-12-10 00:31:21.161796936 +0000 UTC m=+54.239754093 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs") pod "network-metrics-daemon-ndwnl" (UID: "ec324800-e820-40c0-8b51-b020075f09eb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.197412 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.197499 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.197518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.197543 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.197563 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:13Z","lastTransitionTime":"2025-12-10T00:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.286886 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.287045 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:13 crc kubenswrapper[4884]: E1210 00:31:13.287140 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.287183 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:13 crc kubenswrapper[4884]: E1210 00:31:13.287416 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:13 crc kubenswrapper[4884]: E1210 00:31:13.287624 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.300725 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.300778 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.300795 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.300817 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.300834 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:13Z","lastTransitionTime":"2025-12-10T00:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.403698 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.403770 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.403779 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.403804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.403817 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:13Z","lastTransitionTime":"2025-12-10T00:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.506736 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.506828 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.506843 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.506931 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.506952 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:13Z","lastTransitionTime":"2025-12-10T00:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.634639 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.634715 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.634733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.634766 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.634784 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:13Z","lastTransitionTime":"2025-12-10T00:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.739288 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.739363 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.739389 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.739424 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.739517 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:13Z","lastTransitionTime":"2025-12-10T00:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.843351 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.843422 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.843492 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.843529 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.843551 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:13Z","lastTransitionTime":"2025-12-10T00:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.947115 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.947199 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.947221 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.947252 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:13 crc kubenswrapper[4884]: I1210 00:31:13.947272 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:13Z","lastTransitionTime":"2025-12-10T00:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.051160 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.051222 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.051238 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.051265 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.051282 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:14Z","lastTransitionTime":"2025-12-10T00:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.154791 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.155025 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.155050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.155081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.155103 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:14Z","lastTransitionTime":"2025-12-10T00:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.258699 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.258771 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.258793 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.258819 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.258836 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:14Z","lastTransitionTime":"2025-12-10T00:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.286663 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:14 crc kubenswrapper[4884]: E1210 00:31:14.286926 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.362165 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.362245 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.362262 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.362288 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.362307 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:14Z","lastTransitionTime":"2025-12-10T00:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.467398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.467498 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.467516 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.467550 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.467566 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:14Z","lastTransitionTime":"2025-12-10T00:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.570954 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.571418 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.571691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.572140 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.572564 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:14Z","lastTransitionTime":"2025-12-10T00:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.675612 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.675657 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.675670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.675695 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.675713 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:14Z","lastTransitionTime":"2025-12-10T00:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.779274 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.779390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.779420 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.779510 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.779543 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:14Z","lastTransitionTime":"2025-12-10T00:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.883254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.883855 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.883988 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.884127 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.884350 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:14Z","lastTransitionTime":"2025-12-10T00:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.988674 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.988743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.988769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.988798 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:14 crc kubenswrapper[4884]: I1210 00:31:14.988820 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:14Z","lastTransitionTime":"2025-12-10T00:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.092305 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.093547 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.094306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.094829 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.095154 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:15Z","lastTransitionTime":"2025-12-10T00:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.199017 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.199412 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.199645 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.199823 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.199966 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:15Z","lastTransitionTime":"2025-12-10T00:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.286945 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:15 crc kubenswrapper[4884]: E1210 00:31:15.287171 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.287568 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:15 crc kubenswrapper[4884]: E1210 00:31:15.287740 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.287984 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:15 crc kubenswrapper[4884]: E1210 00:31:15.288144 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.302795 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.302863 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.302886 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.302913 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.302935 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:15Z","lastTransitionTime":"2025-12-10T00:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.406278 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.406658 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.407023 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.407411 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.407704 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:15Z","lastTransitionTime":"2025-12-10T00:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.510888 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.510960 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.510985 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.511016 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.511038 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:15Z","lastTransitionTime":"2025-12-10T00:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.613892 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.613964 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.613985 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.614011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.614035 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:15Z","lastTransitionTime":"2025-12-10T00:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.718098 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.718205 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.718226 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.718259 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.718282 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:15Z","lastTransitionTime":"2025-12-10T00:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.821674 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.821753 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.821771 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.821801 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.821820 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:15Z","lastTransitionTime":"2025-12-10T00:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.925346 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.925808 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.926398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.926928 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:15 crc kubenswrapper[4884]: I1210 00:31:15.927429 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:15Z","lastTransitionTime":"2025-12-10T00:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.031518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.031849 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.032029 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.032169 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.032787 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:16Z","lastTransitionTime":"2025-12-10T00:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.162004 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.162047 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.162057 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.162099 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.162115 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:16Z","lastTransitionTime":"2025-12-10T00:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.266417 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.266565 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.266590 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.266627 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.266652 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:16Z","lastTransitionTime":"2025-12-10T00:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.287017 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:16 crc kubenswrapper[4884]: E1210 00:31:16.287270 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.371215 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.371291 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.371311 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.371342 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.371368 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:16Z","lastTransitionTime":"2025-12-10T00:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.475911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.475995 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.476019 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.476145 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.476206 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:16Z","lastTransitionTime":"2025-12-10T00:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.579995 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.580052 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.580065 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.580087 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.580103 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:16Z","lastTransitionTime":"2025-12-10T00:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.682965 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.683028 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.683044 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.683065 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.683080 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:16Z","lastTransitionTime":"2025-12-10T00:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.787154 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.787235 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.787261 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.787298 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.787354 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:16Z","lastTransitionTime":"2025-12-10T00:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.890538 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.890626 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.890650 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.890684 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.890706 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:16Z","lastTransitionTime":"2025-12-10T00:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.910083 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.910242 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.910307 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:16 crc kubenswrapper[4884]: E1210 00:31:16.910495 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:31:16 crc kubenswrapper[4884]: E1210 00:31:16.910572 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:31:48.910549268 +0000 UTC m=+81.988506425 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:31:16 crc kubenswrapper[4884]: E1210 00:31:16.910786 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:31:16 crc kubenswrapper[4884]: E1210 00:31:16.910914 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:31:48.910881178 +0000 UTC m=+81.988838335 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:31:16 crc kubenswrapper[4884]: E1210 00:31:16.910962 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:31:48.910938239 +0000 UTC m=+81.988895396 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.994315 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.994393 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.994411 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.994470 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:16 crc kubenswrapper[4884]: I1210 00:31:16.994491 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:16Z","lastTransitionTime":"2025-12-10T00:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.012004 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.012070 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:17 crc kubenswrapper[4884]: E1210 00:31:17.012274 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:31:17 crc kubenswrapper[4884]: E1210 00:31:17.012304 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:31:17 crc kubenswrapper[4884]: E1210 00:31:17.012324 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:31:17 crc kubenswrapper[4884]: E1210 00:31:17.012326 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:31:17 crc kubenswrapper[4884]: E1210 00:31:17.012367 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:31:17 crc kubenswrapper[4884]: E1210 00:31:17.012425 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:31:17 crc kubenswrapper[4884]: E1210 00:31:17.012475 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 00:31:49.012379411 +0000 UTC m=+82.090336558 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:31:17 crc kubenswrapper[4884]: E1210 00:31:17.012560 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 00:31:49.012525706 +0000 UTC m=+82.090482863 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.098792 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.098898 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.098924 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.098963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.098983 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:17Z","lastTransitionTime":"2025-12-10T00:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.202931 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.203013 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.203036 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.203074 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.203101 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:17Z","lastTransitionTime":"2025-12-10T00:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.286197 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.286317 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:17 crc kubenswrapper[4884]: E1210 00:31:17.286492 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.286567 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:17 crc kubenswrapper[4884]: E1210 00:31:17.286803 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:17 crc kubenswrapper[4884]: E1210 00:31:17.287017 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.306187 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.306268 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.306295 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.306331 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.306349 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:17Z","lastTransitionTime":"2025-12-10T00:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.311818 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.327936 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.341963 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.360577 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.379524 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.396426 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.410327 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.410381 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.410400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.410490 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.410516 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:17Z","lastTransitionTime":"2025-12-10T00:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.418291 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.441159 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.458028 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.475864 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.503151 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.514226 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.514759 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.514815 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.514852 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.514876 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:17Z","lastTransitionTime":"2025-12-10T00:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.522221 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.536063 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.553543 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.583109 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.599164 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:17Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.617943 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.618008 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.618027 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.618055 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.618077 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:17Z","lastTransitionTime":"2025-12-10T00:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.721490 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.721959 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.721984 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.722015 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.722038 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:17Z","lastTransitionTime":"2025-12-10T00:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.824583 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.824630 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.824647 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.824670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.824688 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:17Z","lastTransitionTime":"2025-12-10T00:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.928059 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.928121 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.928144 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.928174 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:17 crc kubenswrapper[4884]: I1210 00:31:17.928194 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:17Z","lastTransitionTime":"2025-12-10T00:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.032021 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.032114 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.032139 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.032179 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.032207 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:18Z","lastTransitionTime":"2025-12-10T00:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.135250 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.135319 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.135338 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.135367 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.135388 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:18Z","lastTransitionTime":"2025-12-10T00:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.238648 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.238719 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.238738 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.238763 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.238780 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:18Z","lastTransitionTime":"2025-12-10T00:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.286329 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:18 crc kubenswrapper[4884]: E1210 00:31:18.286528 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.342167 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.342265 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.342291 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.342327 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.342350 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:18Z","lastTransitionTime":"2025-12-10T00:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.445544 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.445646 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.445666 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.445733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.445756 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:18Z","lastTransitionTime":"2025-12-10T00:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.549604 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.549676 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.549696 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.549724 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.549744 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:18Z","lastTransitionTime":"2025-12-10T00:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.653240 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.653296 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.653312 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.653331 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.653341 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:18Z","lastTransitionTime":"2025-12-10T00:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.757142 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.757202 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.757219 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.757248 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.757268 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:18Z","lastTransitionTime":"2025-12-10T00:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.861730 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.861811 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.861832 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.861860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.861886 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:18Z","lastTransitionTime":"2025-12-10T00:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.965892 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.965989 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.966011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.966046 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:18 crc kubenswrapper[4884]: I1210 00:31:18.966069 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:18Z","lastTransitionTime":"2025-12-10T00:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.070558 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.070624 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.070647 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.070683 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.070711 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.174952 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.175130 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.175162 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.175204 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.175230 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.278934 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.279754 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.279794 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.279830 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.279850 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.287276 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.287329 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.287327 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:19 crc kubenswrapper[4884]: E1210 00:31:19.287491 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:19 crc kubenswrapper[4884]: E1210 00:31:19.287647 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:19 crc kubenswrapper[4884]: E1210 00:31:19.287776 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.383423 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.383501 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.383519 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.383547 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.383565 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.487103 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.487150 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.487165 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.487187 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.487203 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.556777 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.556825 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.556851 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.556878 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.556931 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: E1210 00:31:19.579559 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:19Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.586117 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.586195 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.586215 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.586296 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.586317 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: E1210 00:31:19.609067 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:19Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.614041 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.614090 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.614109 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.614132 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.614154 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: E1210 00:31:19.633978 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:19Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.639113 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.639191 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.639214 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.639245 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.639271 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: E1210 00:31:19.661977 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:19Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.668316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.668383 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.668403 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.668469 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.668490 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: E1210 00:31:19.690422 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:19Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:19 crc kubenswrapper[4884]: E1210 00:31:19.690671 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.692785 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.692839 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.692857 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.692885 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.692905 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.795872 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.795940 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.795964 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.795994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.796018 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.898853 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.898928 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.898951 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.898984 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:19 crc kubenswrapper[4884]: I1210 00:31:19.899008 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:19Z","lastTransitionTime":"2025-12-10T00:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.002670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.002735 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.002763 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.002799 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.002823 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:20Z","lastTransitionTime":"2025-12-10T00:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.106222 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.106598 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.106626 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.106658 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.106675 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:20Z","lastTransitionTime":"2025-12-10T00:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.209670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.209892 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.210038 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.210194 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.210323 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:20Z","lastTransitionTime":"2025-12-10T00:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.286867 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:20 crc kubenswrapper[4884]: E1210 00:31:20.287129 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.313780 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.313861 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.313881 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.313910 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.313932 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:20Z","lastTransitionTime":"2025-12-10T00:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.417310 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.417394 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.417417 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.417550 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.417576 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:20Z","lastTransitionTime":"2025-12-10T00:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.521139 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.521531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.521690 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.521837 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.521970 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:20Z","lastTransitionTime":"2025-12-10T00:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.626050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.626246 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.626394 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.626584 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.626723 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:20Z","lastTransitionTime":"2025-12-10T00:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.730456 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.730520 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.730541 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.730565 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.730585 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:20Z","lastTransitionTime":"2025-12-10T00:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.834336 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.834421 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.834475 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.834510 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.834532 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:20Z","lastTransitionTime":"2025-12-10T00:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.939242 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.939312 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.939331 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.939359 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:20 crc kubenswrapper[4884]: I1210 00:31:20.939378 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:20Z","lastTransitionTime":"2025-12-10T00:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.043233 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.043295 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.043313 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.043342 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.043358 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:21Z","lastTransitionTime":"2025-12-10T00:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.146424 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.146768 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.146790 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.146821 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.146841 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:21Z","lastTransitionTime":"2025-12-10T00:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.174621 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:21 crc kubenswrapper[4884]: E1210 00:31:21.174918 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:21 crc kubenswrapper[4884]: E1210 00:31:21.175057 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs podName:ec324800-e820-40c0-8b51-b020075f09eb nodeName:}" failed. No retries permitted until 2025-12-10 00:31:37.175023953 +0000 UTC m=+70.252981240 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs") pod "network-metrics-daemon-ndwnl" (UID: "ec324800-e820-40c0-8b51-b020075f09eb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.250078 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.250139 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.250154 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.250178 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.250192 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:21Z","lastTransitionTime":"2025-12-10T00:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.286393 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.286523 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.286571 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:21 crc kubenswrapper[4884]: E1210 00:31:21.286868 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:21 crc kubenswrapper[4884]: E1210 00:31:21.287140 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:21 crc kubenswrapper[4884]: E1210 00:31:21.287286 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.352885 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.352927 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.352943 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.352963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.352980 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:21Z","lastTransitionTime":"2025-12-10T00:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.456264 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.456337 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.456361 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.456396 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.456426 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:21Z","lastTransitionTime":"2025-12-10T00:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.560101 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.560149 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.560161 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.560178 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.560191 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:21Z","lastTransitionTime":"2025-12-10T00:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.663254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.663335 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.663357 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.663392 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.663415 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:21Z","lastTransitionTime":"2025-12-10T00:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.766864 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.766922 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.766939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.766968 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.766985 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:21Z","lastTransitionTime":"2025-12-10T00:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.869605 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.869669 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.869686 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.869710 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.869730 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:21Z","lastTransitionTime":"2025-12-10T00:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.972994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.973045 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.973062 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.973086 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:21 crc kubenswrapper[4884]: I1210 00:31:21.973123 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:21Z","lastTransitionTime":"2025-12-10T00:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.076258 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.076321 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.076344 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.076374 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.076400 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:22Z","lastTransitionTime":"2025-12-10T00:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.179951 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.180011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.180028 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.180051 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.180069 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:22Z","lastTransitionTime":"2025-12-10T00:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.284292 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.284362 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.284372 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.284386 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.284395 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:22Z","lastTransitionTime":"2025-12-10T00:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.286529 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:22 crc kubenswrapper[4884]: E1210 00:31:22.286616 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.355020 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.365318 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.374622 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.387527 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.387860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.388342 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.388588 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.388731 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:22Z","lastTransitionTime":"2025-12-10T00:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.390291 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.405899 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.420972 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.436453 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.465876 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.479545 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.492400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.492451 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.492462 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.492479 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.492490 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:22Z","lastTransitionTime":"2025-12-10T00:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.498990 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.511790 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.526527 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.550230 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.569281 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.583598 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.595078 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.595150 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.595169 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.595196 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.595215 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:22Z","lastTransitionTime":"2025-12-10T00:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.602124 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.621752 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.640359 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:22Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.698624 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.698884 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.698922 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.698951 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.698972 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:22Z","lastTransitionTime":"2025-12-10T00:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.801855 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.801913 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.801923 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.801938 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.801948 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:22Z","lastTransitionTime":"2025-12-10T00:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.904934 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.905000 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.905018 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.905043 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:22 crc kubenswrapper[4884]: I1210 00:31:22.905061 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:22Z","lastTransitionTime":"2025-12-10T00:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.008281 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.008356 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.008378 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.008407 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.008427 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:23Z","lastTransitionTime":"2025-12-10T00:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.111545 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.111602 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.111624 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.111652 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.111670 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:23Z","lastTransitionTime":"2025-12-10T00:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.214902 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.214980 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.215005 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.215040 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.215067 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:23Z","lastTransitionTime":"2025-12-10T00:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.286942 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:23 crc kubenswrapper[4884]: E1210 00:31:23.287127 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.287257 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.287282 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:23 crc kubenswrapper[4884]: E1210 00:31:23.287519 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:23 crc kubenswrapper[4884]: E1210 00:31:23.288284 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.288975 4884 scope.go:117] "RemoveContainer" containerID="fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.317976 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.318026 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.318042 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.318065 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.318082 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:23Z","lastTransitionTime":"2025-12-10T00:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.420841 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.420907 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.420925 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.420954 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.420972 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:23Z","lastTransitionTime":"2025-12-10T00:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.524047 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.524085 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.524102 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.524125 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.524143 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:23Z","lastTransitionTime":"2025-12-10T00:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.627526 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.627574 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.627591 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.627614 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.627632 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:23Z","lastTransitionTime":"2025-12-10T00:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.706717 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/1.log" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.710306 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287"} Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.712619 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.747682 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.747740 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.747764 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.747798 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.747824 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:23Z","lastTransitionTime":"2025-12-10T00:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.760849 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.791396 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.815184 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.833540 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.844573 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.850769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.850830 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.850845 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.850874 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.850891 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:23Z","lastTransitionTime":"2025-12-10T00:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.861614 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.880203 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.897588 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.915585 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.943717 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.953644 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.953680 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.953689 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.953705 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.953715 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:23Z","lastTransitionTime":"2025-12-10T00:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.958582 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.980666 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:23 crc kubenswrapper[4884]: I1210 00:31:23.997095 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:23Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.008895 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.017009 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.029219 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.042903 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.055995 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.056044 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.056056 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.056075 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.056087 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:24Z","lastTransitionTime":"2025-12-10T00:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.158273 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.158302 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.158310 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.158324 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.158333 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:24Z","lastTransitionTime":"2025-12-10T00:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.261289 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.261337 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.261352 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.261367 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.261378 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:24Z","lastTransitionTime":"2025-12-10T00:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.287108 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:24 crc kubenswrapper[4884]: E1210 00:31:24.287291 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.365381 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.365524 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.365551 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.365583 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.365605 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:24Z","lastTransitionTime":"2025-12-10T00:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.469892 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.470578 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.470606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.470641 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.470667 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:24Z","lastTransitionTime":"2025-12-10T00:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.573623 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.573789 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.573816 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.573840 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.573857 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:24Z","lastTransitionTime":"2025-12-10T00:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.676922 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.677050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.677075 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.677114 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.677134 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:24Z","lastTransitionTime":"2025-12-10T00:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.780635 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.780695 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.780717 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.780935 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.780954 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:24Z","lastTransitionTime":"2025-12-10T00:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.884532 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.884591 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.884611 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.884637 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.884656 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:24Z","lastTransitionTime":"2025-12-10T00:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.988684 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.988746 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.988757 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.988778 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:24 crc kubenswrapper[4884]: I1210 00:31:24.988790 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:24Z","lastTransitionTime":"2025-12-10T00:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.092353 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.092466 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.092636 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.092987 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.093049 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:25Z","lastTransitionTime":"2025-12-10T00:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.195939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.195987 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.196001 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.196021 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.196034 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:25Z","lastTransitionTime":"2025-12-10T00:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.286778 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.286820 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:25 crc kubenswrapper[4884]: E1210 00:31:25.286954 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.287022 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:25 crc kubenswrapper[4884]: E1210 00:31:25.287109 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:25 crc kubenswrapper[4884]: E1210 00:31:25.287515 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.306531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.306683 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.306711 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.306783 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.306809 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:25Z","lastTransitionTime":"2025-12-10T00:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.410953 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.411017 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.411034 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.411128 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.411169 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:25Z","lastTransitionTime":"2025-12-10T00:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.514515 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.514585 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.514605 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.514640 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.514659 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:25Z","lastTransitionTime":"2025-12-10T00:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.617936 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.618011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.618029 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.618056 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.618084 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:25Z","lastTransitionTime":"2025-12-10T00:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.719976 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.720022 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.720039 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.720064 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.720080 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:25Z","lastTransitionTime":"2025-12-10T00:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.720326 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/2.log" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.721216 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/1.log" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.725217 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287" exitCode=1 Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.725257 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287"} Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.725298 4884 scope.go:117] "RemoveContainer" containerID="fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.726539 4884 scope.go:117] "RemoveContainer" containerID="a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287" Dec 10 00:31:25 crc kubenswrapper[4884]: E1210 00:31:25.727795 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.755666 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:25Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.774486 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:25Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.794028 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:25Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.815176 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:25Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.823484 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.823550 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.823562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.823584 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.823597 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:25Z","lastTransitionTime":"2025-12-10T00:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.837110 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:25Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.853526 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:25Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.874825 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:25Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.893064 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:25Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.909350 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:25Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.926274 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.926327 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.926345 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.926371 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.926460 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:25Z","lastTransitionTime":"2025-12-10T00:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.934585 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:25Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.969970 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:24Z\\\",\\\"message\\\":\\\"e, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:24.373214 6602 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF1210 00:31:24.372907 6602 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z]\\\\nI1210 00:31:24.373233 6602 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1210 00:31:24.373115 6602 services_controller.go:443] Built service openshift-kube-storage-version-migrator-operator/metrics LB cluster-wide configs for network=default: []services.lbConfi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:25Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:25 crc kubenswrapper[4884]: I1210 00:31:25.988800 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:25Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.011729 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:26Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.030063 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.030117 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.030140 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.030198 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.030216 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:26Z","lastTransitionTime":"2025-12-10T00:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.030264 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:26Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.049509 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:26Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.065149 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:26Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.082826 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:26Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.133549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.133613 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.133632 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.133659 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.133677 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:26Z","lastTransitionTime":"2025-12-10T00:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.236289 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.236357 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.236374 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.236398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.236418 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:26Z","lastTransitionTime":"2025-12-10T00:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.286326 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:26 crc kubenswrapper[4884]: E1210 00:31:26.286547 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.339618 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.339688 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.339710 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.339739 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.339758 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:26Z","lastTransitionTime":"2025-12-10T00:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.443810 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.443903 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.443930 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.443965 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.443990 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:26Z","lastTransitionTime":"2025-12-10T00:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.547179 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.547276 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.547298 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.547327 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.547344 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:26Z","lastTransitionTime":"2025-12-10T00:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.650627 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.650682 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.650700 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.650730 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.650748 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:26Z","lastTransitionTime":"2025-12-10T00:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.732371 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/2.log" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.753796 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.753902 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.753920 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.753947 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.753966 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:26Z","lastTransitionTime":"2025-12-10T00:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.857656 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.857729 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.857748 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.857775 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.857797 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:26Z","lastTransitionTime":"2025-12-10T00:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.961056 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.961099 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.961109 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.961142 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:26 crc kubenswrapper[4884]: I1210 00:31:26.961153 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:26Z","lastTransitionTime":"2025-12-10T00:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.063596 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.063684 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.063709 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.063742 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.063767 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:27Z","lastTransitionTime":"2025-12-10T00:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.166929 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.167002 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.167028 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.167059 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.167079 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:27Z","lastTransitionTime":"2025-12-10T00:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.270296 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.270375 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.270395 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.270423 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.270472 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:27Z","lastTransitionTime":"2025-12-10T00:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.286388 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.286391 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.286636 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:27 crc kubenswrapper[4884]: E1210 00:31:27.286841 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:27 crc kubenswrapper[4884]: E1210 00:31:27.286988 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:27 crc kubenswrapper[4884]: E1210 00:31:27.287167 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.307416 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.329997 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.346967 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.370705 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.373864 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.373916 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.373936 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.373965 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.373984 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:27Z","lastTransitionTime":"2025-12-10T00:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.387622 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.404788 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.423270 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.435399 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.451488 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.500672 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.501116 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.501208 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.501293 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.501409 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:27Z","lastTransitionTime":"2025-12-10T00:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.506360 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.518493 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.535832 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.569911 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:24Z\\\",\\\"message\\\":\\\"e, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:24.373214 6602 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF1210 00:31:24.372907 6602 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z]\\\\nI1210 00:31:24.373233 6602 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1210 00:31:24.373115 6602 services_controller.go:443] Built service openshift-kube-storage-version-migrator-operator/metrics LB cluster-wide configs for network=default: []services.lbConfi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.582521 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.599003 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.604252 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.604283 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.604295 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.604314 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.604326 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:27Z","lastTransitionTime":"2025-12-10T00:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.615649 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.628509 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:27Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.706351 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.706401 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.706412 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.706429 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.706475 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:27Z","lastTransitionTime":"2025-12-10T00:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.810042 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.810118 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.810139 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.810167 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.810186 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:27Z","lastTransitionTime":"2025-12-10T00:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.913110 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.913162 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.913176 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.913200 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:27 crc kubenswrapper[4884]: I1210 00:31:27.913217 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:27Z","lastTransitionTime":"2025-12-10T00:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.015720 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.015771 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.015785 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.015806 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.015820 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:28Z","lastTransitionTime":"2025-12-10T00:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.118721 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.118787 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.118811 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.118845 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.118871 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:28Z","lastTransitionTime":"2025-12-10T00:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.222516 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.222573 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.222590 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.222614 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.222632 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:28Z","lastTransitionTime":"2025-12-10T00:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.286754 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:28 crc kubenswrapper[4884]: E1210 00:31:28.287012 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.325393 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.325494 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.325519 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.325551 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.325577 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:28Z","lastTransitionTime":"2025-12-10T00:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.430162 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.430249 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.430270 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.430302 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.430323 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:28Z","lastTransitionTime":"2025-12-10T00:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.533942 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.534042 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.534072 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.534105 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.534130 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:28Z","lastTransitionTime":"2025-12-10T00:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.637455 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.637531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.637556 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.637587 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.637611 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:28Z","lastTransitionTime":"2025-12-10T00:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.740783 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.740850 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.740871 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.740904 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.740922 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:28Z","lastTransitionTime":"2025-12-10T00:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.844699 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.844775 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.844804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.844841 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.844864 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:28Z","lastTransitionTime":"2025-12-10T00:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.947768 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.948363 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.948568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.948744 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:28 crc kubenswrapper[4884]: I1210 00:31:28.948942 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:28Z","lastTransitionTime":"2025-12-10T00:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.053021 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.053515 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.053759 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.053964 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.054323 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.163400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.163549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.163570 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.163599 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.163862 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.267457 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.267526 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.267545 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.267571 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.267588 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.286918 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:29 crc kubenswrapper[4884]: E1210 00:31:29.287157 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.287614 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:29 crc kubenswrapper[4884]: E1210 00:31:29.287775 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.288340 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:29 crc kubenswrapper[4884]: E1210 00:31:29.288662 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.370929 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.371001 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.371019 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.371047 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.371066 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.474769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.474841 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.474862 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.474894 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.474914 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.578134 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.578504 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.578663 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.578817 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.578962 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.682486 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.682559 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.682581 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.682610 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.682629 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.758317 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.758386 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.758403 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.758512 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.758534 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: E1210 00:31:29.781175 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:29Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.787762 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.788039 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.788201 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.788341 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.788540 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: E1210 00:31:29.807818 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:29Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.813155 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.813216 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.813239 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.813267 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.813286 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: E1210 00:31:29.828486 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:29Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.834014 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.834290 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.834519 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.834733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.834925 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: E1210 00:31:29.855826 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:29Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.861539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.861599 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.861623 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.861654 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.861672 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: E1210 00:31:29.882642 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:29Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:29Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:29 crc kubenswrapper[4884]: E1210 00:31:29.882861 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.885198 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.885247 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.885264 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.885290 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.885308 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.988549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.988622 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.988642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.988672 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:29 crc kubenswrapper[4884]: I1210 00:31:29.988695 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:29Z","lastTransitionTime":"2025-12-10T00:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.092197 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.092232 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.092239 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.092253 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.092262 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:30Z","lastTransitionTime":"2025-12-10T00:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.195075 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.195135 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.195145 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.195160 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.195174 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:30Z","lastTransitionTime":"2025-12-10T00:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.286612 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:30 crc kubenswrapper[4884]: E1210 00:31:30.286779 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.297459 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.297486 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.297495 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.297506 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.297515 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:30Z","lastTransitionTime":"2025-12-10T00:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.400419 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.400490 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.400508 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.400527 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.400544 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:30Z","lastTransitionTime":"2025-12-10T00:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.505194 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.505231 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.505241 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.505258 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.505270 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:30Z","lastTransitionTime":"2025-12-10T00:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.607504 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.607563 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.607582 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.607606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.607623 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:30Z","lastTransitionTime":"2025-12-10T00:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.710477 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.710519 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.710530 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.710548 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.710560 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:30Z","lastTransitionTime":"2025-12-10T00:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.812364 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.812426 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.812473 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.812499 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.812516 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:30Z","lastTransitionTime":"2025-12-10T00:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.915731 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.915792 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.915814 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.915842 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:30 crc kubenswrapper[4884]: I1210 00:31:30.915862 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:30Z","lastTransitionTime":"2025-12-10T00:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.018992 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.019051 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.019067 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.019129 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.019149 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:31Z","lastTransitionTime":"2025-12-10T00:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.122343 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.122390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.122407 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.122462 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.122481 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:31Z","lastTransitionTime":"2025-12-10T00:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.226481 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.226546 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.226562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.226586 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.226601 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:31Z","lastTransitionTime":"2025-12-10T00:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.287229 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.287259 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:31 crc kubenswrapper[4884]: E1210 00:31:31.287394 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.287485 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:31 crc kubenswrapper[4884]: E1210 00:31:31.287735 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:31 crc kubenswrapper[4884]: E1210 00:31:31.287935 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.329218 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.329291 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.329309 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.329346 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.329367 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:31Z","lastTransitionTime":"2025-12-10T00:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.432204 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.432253 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.432265 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.432281 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.432291 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:31Z","lastTransitionTime":"2025-12-10T00:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.534040 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.534092 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.534107 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.534131 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.534147 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:31Z","lastTransitionTime":"2025-12-10T00:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.637365 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.637421 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.637457 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.637480 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.637493 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:31Z","lastTransitionTime":"2025-12-10T00:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.740081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.740122 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.740134 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.740151 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.740162 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:31Z","lastTransitionTime":"2025-12-10T00:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.843062 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.843121 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.843135 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.843153 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.843166 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:31Z","lastTransitionTime":"2025-12-10T00:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.947462 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.947551 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.947579 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.947616 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:31 crc kubenswrapper[4884]: I1210 00:31:31.947642 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:31Z","lastTransitionTime":"2025-12-10T00:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.050877 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.050951 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.050969 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.050994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.051536 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:32Z","lastTransitionTime":"2025-12-10T00:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.155307 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.155361 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.155374 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.155394 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.155405 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:32Z","lastTransitionTime":"2025-12-10T00:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.258926 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.258987 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.258994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.259014 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.259024 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:32Z","lastTransitionTime":"2025-12-10T00:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.286651 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:32 crc kubenswrapper[4884]: E1210 00:31:32.286884 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.362200 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.362258 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.362276 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.362311 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.362329 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:32Z","lastTransitionTime":"2025-12-10T00:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.464921 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.464975 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.464991 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.465012 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.465028 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:32Z","lastTransitionTime":"2025-12-10T00:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.568839 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.568973 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.568998 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.569033 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.569066 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:32Z","lastTransitionTime":"2025-12-10T00:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.673021 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.673070 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.673083 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.673101 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.673112 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:32Z","lastTransitionTime":"2025-12-10T00:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.775922 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.775990 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.776011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.776040 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.776059 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:32Z","lastTransitionTime":"2025-12-10T00:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.879727 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.879773 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.879789 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.879812 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.879825 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:32Z","lastTransitionTime":"2025-12-10T00:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.983725 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.983804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.983829 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.983864 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:32 crc kubenswrapper[4884]: I1210 00:31:32.983891 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:32Z","lastTransitionTime":"2025-12-10T00:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.086936 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.087015 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.087034 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.087062 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.087080 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:33Z","lastTransitionTime":"2025-12-10T00:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.190592 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.190653 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.190666 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.190687 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.190704 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:33Z","lastTransitionTime":"2025-12-10T00:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.286767 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.286886 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.286925 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:33 crc kubenswrapper[4884]: E1210 00:31:33.287031 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:33 crc kubenswrapper[4884]: E1210 00:31:33.287059 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:33 crc kubenswrapper[4884]: E1210 00:31:33.287156 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.293682 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.293711 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.293724 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.293739 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.293752 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:33Z","lastTransitionTime":"2025-12-10T00:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.395829 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.395876 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.395888 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.395905 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.395918 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:33Z","lastTransitionTime":"2025-12-10T00:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.497989 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.498026 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.498035 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.498049 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.498058 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:33Z","lastTransitionTime":"2025-12-10T00:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.601087 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.601137 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.601150 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.601169 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.601181 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:33Z","lastTransitionTime":"2025-12-10T00:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.703287 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.703328 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.703339 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.703355 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.703367 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:33Z","lastTransitionTime":"2025-12-10T00:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.806520 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.806565 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.806575 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.806594 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.806603 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:33Z","lastTransitionTime":"2025-12-10T00:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.909795 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.909868 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.909886 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.909914 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:33 crc kubenswrapper[4884]: I1210 00:31:33.909935 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:33Z","lastTransitionTime":"2025-12-10T00:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.012562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.012636 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.012649 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.012676 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.012695 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:34Z","lastTransitionTime":"2025-12-10T00:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.115464 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.115546 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.115566 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.115605 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.115625 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:34Z","lastTransitionTime":"2025-12-10T00:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.218771 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.218840 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.218858 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.218883 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.218901 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:34Z","lastTransitionTime":"2025-12-10T00:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.286943 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:34 crc kubenswrapper[4884]: E1210 00:31:34.287217 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.322492 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.322567 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.322581 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.322603 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.322620 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:34Z","lastTransitionTime":"2025-12-10T00:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.425975 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.426045 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.426063 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.426088 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.426106 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:34Z","lastTransitionTime":"2025-12-10T00:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.528752 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.528785 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.528794 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.528808 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.528817 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:34Z","lastTransitionTime":"2025-12-10T00:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.631545 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.631604 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.631622 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.631646 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.631667 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:34Z","lastTransitionTime":"2025-12-10T00:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.735216 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.735297 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.735318 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.735348 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.735368 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:34Z","lastTransitionTime":"2025-12-10T00:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.838544 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.838594 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.838604 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.838621 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.838634 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:34Z","lastTransitionTime":"2025-12-10T00:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.941317 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.941366 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.941379 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.941397 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:34 crc kubenswrapper[4884]: I1210 00:31:34.941407 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:34Z","lastTransitionTime":"2025-12-10T00:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.044693 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.044760 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.044775 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.044795 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.044808 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:35Z","lastTransitionTime":"2025-12-10T00:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.148018 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.148099 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.148117 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.148145 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.148163 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:35Z","lastTransitionTime":"2025-12-10T00:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.251185 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.251242 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.251265 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.251297 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.251317 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:35Z","lastTransitionTime":"2025-12-10T00:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.286202 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.286202 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:35 crc kubenswrapper[4884]: E1210 00:31:35.286377 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.286481 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:35 crc kubenswrapper[4884]: E1210 00:31:35.286634 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:35 crc kubenswrapper[4884]: E1210 00:31:35.286724 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.354414 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.354518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.354537 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.354568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.354588 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:35Z","lastTransitionTime":"2025-12-10T00:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.456912 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.456957 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.456969 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.456983 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.456993 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:35Z","lastTransitionTime":"2025-12-10T00:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.559326 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.559372 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.559397 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.559416 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.559452 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:35Z","lastTransitionTime":"2025-12-10T00:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.662310 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.662354 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.662384 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.662398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.662407 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:35Z","lastTransitionTime":"2025-12-10T00:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.764759 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.764806 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.764822 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.764847 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.764863 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:35Z","lastTransitionTime":"2025-12-10T00:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.868143 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.868184 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.868201 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.868226 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.868245 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:35Z","lastTransitionTime":"2025-12-10T00:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.971829 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.971963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.971984 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.972066 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:35 crc kubenswrapper[4884]: I1210 00:31:35.972127 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:35Z","lastTransitionTime":"2025-12-10T00:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.076174 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.076232 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.076254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.076279 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.076296 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:36Z","lastTransitionTime":"2025-12-10T00:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.178803 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.178858 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.178873 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.178894 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.178906 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:36Z","lastTransitionTime":"2025-12-10T00:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.281529 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.281807 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.281817 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.281835 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.281847 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:36Z","lastTransitionTime":"2025-12-10T00:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.286797 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:36 crc kubenswrapper[4884]: E1210 00:31:36.286967 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.384744 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.384809 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.384823 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.384845 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.384859 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:36Z","lastTransitionTime":"2025-12-10T00:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.487614 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.487677 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.487697 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.487725 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.487742 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:36Z","lastTransitionTime":"2025-12-10T00:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.590334 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.590373 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.590384 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.590399 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.590410 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:36Z","lastTransitionTime":"2025-12-10T00:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.693093 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.693150 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.693169 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.693195 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.693217 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:36Z","lastTransitionTime":"2025-12-10T00:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.776884 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rcj68_0269081f-f135-4e66-91fd-a16277a00355/kube-multus/0.log" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.776960 4884 generic.go:334] "Generic (PLEG): container finished" podID="0269081f-f135-4e66-91fd-a16277a00355" containerID="bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2" exitCode=1 Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.777001 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rcj68" event={"ID":"0269081f-f135-4e66-91fd-a16277a00355","Type":"ContainerDied","Data":"bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2"} Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.777588 4884 scope.go:117] "RemoveContainer" containerID="bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.791729 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:36Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.795990 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.796063 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.796088 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.796120 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.796143 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:36Z","lastTransitionTime":"2025-12-10T00:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.813990 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:36Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.844208 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:24Z\\\",\\\"message\\\":\\\"e, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:24.373214 6602 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF1210 00:31:24.372907 6602 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z]\\\\nI1210 00:31:24.373233 6602 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1210 00:31:24.373115 6602 services_controller.go:443] Built service openshift-kube-storage-version-migrator-operator/metrics LB cluster-wide configs for network=default: []services.lbConfi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:36Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.857785 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:36Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.883852 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:36Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.901378 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.901472 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.901493 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.901514 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.901525 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:36Z","lastTransitionTime":"2025-12-10T00:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.910129 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:36Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.926664 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:36Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.939461 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:36Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.956880 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:36Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.973129 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:36Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:36 crc kubenswrapper[4884]: I1210 00:31:36.993129 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"2025-12-10T00:30:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c\\\\n2025-12-10T00:30:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c to /host/opt/cni/bin/\\\\n2025-12-10T00:30:51Z [verbose] multus-daemon started\\\\n2025-12-10T00:30:51Z [verbose] Readiness Indicator file check\\\\n2025-12-10T00:31:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:36Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.006529 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.006588 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.006614 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.006646 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.006672 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:37Z","lastTransitionTime":"2025-12-10T00:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.011342 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.030186 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.049246 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.063471 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.077644 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.098150 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.109054 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.109103 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.109122 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.109145 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.109164 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:37Z","lastTransitionTime":"2025-12-10T00:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.188165 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:37 crc kubenswrapper[4884]: E1210 00:31:37.188360 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:37 crc kubenswrapper[4884]: E1210 00:31:37.188509 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs podName:ec324800-e820-40c0-8b51-b020075f09eb nodeName:}" failed. No retries permitted until 2025-12-10 00:32:09.188475712 +0000 UTC m=+102.266432869 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs") pod "network-metrics-daemon-ndwnl" (UID: "ec324800-e820-40c0-8b51-b020075f09eb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.212100 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.212150 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.212168 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.212194 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.212211 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:37Z","lastTransitionTime":"2025-12-10T00:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.286116 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.286238 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.286315 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:37 crc kubenswrapper[4884]: E1210 00:31:37.286469 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:37 crc kubenswrapper[4884]: E1210 00:31:37.286577 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:37 crc kubenswrapper[4884]: E1210 00:31:37.286699 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.308103 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.314410 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.314503 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.314524 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.314551 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.314568 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:37Z","lastTransitionTime":"2025-12-10T00:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.323984 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"2025-12-10T00:30:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c\\\\n2025-12-10T00:30:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c to /host/opt/cni/bin/\\\\n2025-12-10T00:30:51Z [verbose] multus-daemon started\\\\n2025-12-10T00:30:51Z [verbose] Readiness Indicator file check\\\\n2025-12-10T00:31:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.338424 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.352225 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.366056 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.379739 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.395168 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.414908 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.416022 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.416057 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.416066 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.416084 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.416097 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:37Z","lastTransitionTime":"2025-12-10T00:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.427956 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.445342 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.471639 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:24Z\\\",\\\"message\\\":\\\"e, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:24.373214 6602 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF1210 00:31:24.372907 6602 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z]\\\\nI1210 00:31:24.373233 6602 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1210 00:31:24.373115 6602 services_controller.go:443] Built service openshift-kube-storage-version-migrator-operator/metrics LB cluster-wide configs for network=default: []services.lbConfi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.490189 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.512908 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.519211 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.519270 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.519290 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.519319 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.519339 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:37Z","lastTransitionTime":"2025-12-10T00:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.529179 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.546193 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.563257 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.575302 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.622283 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.622801 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.622916 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.622958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.622978 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:37Z","lastTransitionTime":"2025-12-10T00:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.726257 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.726318 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.726339 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.726365 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.726384 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:37Z","lastTransitionTime":"2025-12-10T00:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.783281 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rcj68_0269081f-f135-4e66-91fd-a16277a00355/kube-multus/0.log" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.783395 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rcj68" event={"ID":"0269081f-f135-4e66-91fd-a16277a00355","Type":"ContainerStarted","Data":"90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa"} Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.805326 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.821306 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.831524 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.831571 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.831584 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.831607 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.831622 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:37Z","lastTransitionTime":"2025-12-10T00:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.837299 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.855044 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.872675 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.894282 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"2025-12-10T00:30:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c\\\\n2025-12-10T00:30:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c to /host/opt/cni/bin/\\\\n2025-12-10T00:30:51Z [verbose] multus-daemon started\\\\n2025-12-10T00:30:51Z [verbose] Readiness Indicator file check\\\\n2025-12-10T00:31:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.911765 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.930179 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.934112 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.934155 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.934166 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.934185 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.934198 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:37Z","lastTransitionTime":"2025-12-10T00:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.946018 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.960342 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.977341 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:37 crc kubenswrapper[4884]: I1210 00:31:37.995150 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:37Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.007760 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:38Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.027518 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:38Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.037486 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.037567 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.037592 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.037631 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.037658 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:38Z","lastTransitionTime":"2025-12-10T00:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.054588 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fe87463018eeba6ca0cb7d327589f32dd741c3d4773285774c68e6d1a9713328\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:02Z\\\",\\\"message\\\":\\\"er.go:443] Built service openshift-ingress-canary/ingress-canary LB cluster-wide configs for network=default: []services.lbConfig{services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8443, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}, services.lbConfig{vips:[]string{\\\\\\\"10.217.5.34\\\\\\\"}, protocol:\\\\\\\"TCP\\\\\\\", inport:8888, clusterEndpoints:services.lbEndpoints{Port:0, V4IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:01.693350 6339 services_controller.go:444] Built service openshift-ingress-canary/ingress-canary LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693365 6339 services_controller.go:445] Built service openshift-ingress-canary/ingress-canary LB template configs for network=default: []services.lbConfig(nil)\\\\nI1210 00:31:01.693294 6339 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-pxpwg\\\\nF1210 00:31:01.692827 6339 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:00Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:24Z\\\",\\\"message\\\":\\\"e, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:24.373214 6602 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF1210 00:31:24.372907 6602 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z]\\\\nI1210 00:31:24.373233 6602 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1210 00:31:24.373115 6602 services_controller.go:443] Built service openshift-kube-storage-version-migrator-operator/metrics LB cluster-wide configs for network=default: []services.lbConfi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:38Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.068552 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:38Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.086071 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:38Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.140674 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.140733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.140743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.140766 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.140777 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:38Z","lastTransitionTime":"2025-12-10T00:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.243474 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.243545 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.243570 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.243608 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.243637 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:38Z","lastTransitionTime":"2025-12-10T00:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.286768 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:38 crc kubenswrapper[4884]: E1210 00:31:38.286943 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.346578 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.346633 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.346654 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.346702 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.346721 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:38Z","lastTransitionTime":"2025-12-10T00:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.449808 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.449865 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.449887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.449917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.449939 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:38Z","lastTransitionTime":"2025-12-10T00:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.553134 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.553191 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.553209 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.553238 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.553256 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:38Z","lastTransitionTime":"2025-12-10T00:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.656102 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.656175 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.656196 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.656222 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.656240 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:38Z","lastTransitionTime":"2025-12-10T00:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.758878 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.758913 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.758926 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.758945 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.758960 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:38Z","lastTransitionTime":"2025-12-10T00:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.860922 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.860987 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.861006 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.861028 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.861043 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:38Z","lastTransitionTime":"2025-12-10T00:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.963099 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.963136 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.963147 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.963179 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:38 crc kubenswrapper[4884]: I1210 00:31:38.963192 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:38Z","lastTransitionTime":"2025-12-10T00:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.068587 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.068653 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.068668 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.068690 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.068708 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:39Z","lastTransitionTime":"2025-12-10T00:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.172225 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.172278 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.172289 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.172312 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.172325 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:39Z","lastTransitionTime":"2025-12-10T00:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.275062 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.275108 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.275119 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.275138 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.275150 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:39Z","lastTransitionTime":"2025-12-10T00:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.286701 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.286787 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.286805 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:39 crc kubenswrapper[4884]: E1210 00:31:39.287056 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.287350 4884 scope.go:117] "RemoveContainer" containerID="a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287" Dec 10 00:31:39 crc kubenswrapper[4884]: E1210 00:31:39.287462 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:39 crc kubenswrapper[4884]: E1210 00:31:39.287518 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:39 crc kubenswrapper[4884]: E1210 00:31:39.287541 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.309346 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.328197 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.347422 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.371397 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.379138 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.379191 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.379211 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.379237 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.379258 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:39Z","lastTransitionTime":"2025-12-10T00:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.391002 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.412999 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.433511 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.447916 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.470550 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.481961 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.482030 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.482044 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.482067 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.482080 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:39Z","lastTransitionTime":"2025-12-10T00:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.500245 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:24Z\\\",\\\"message\\\":\\\"e, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:24.373214 6602 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF1210 00:31:24.372907 6602 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z]\\\\nI1210 00:31:24.373233 6602 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1210 00:31:24.373115 6602 services_controller.go:443] Built service openshift-kube-storage-version-migrator-operator/metrics LB cluster-wide configs for network=default: []services.lbConfi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.516321 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.534703 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.554889 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.573266 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.584895 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.584935 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.584945 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.584964 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.584974 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:39Z","lastTransitionTime":"2025-12-10T00:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.591687 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.611990 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.632256 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"2025-12-10T00:30:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c\\\\n2025-12-10T00:30:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c to /host/opt/cni/bin/\\\\n2025-12-10T00:30:51Z [verbose] multus-daemon started\\\\n2025-12-10T00:30:51Z [verbose] Readiness Indicator file check\\\\n2025-12-10T00:31:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:39Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.687840 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.687911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.687930 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.687959 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.687978 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:39Z","lastTransitionTime":"2025-12-10T00:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.790321 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.790405 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.790423 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.790490 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.790516 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:39Z","lastTransitionTime":"2025-12-10T00:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.892478 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.892538 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.892549 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.892573 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.892585 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:39Z","lastTransitionTime":"2025-12-10T00:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.995574 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.995631 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.995642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.995663 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:39 crc kubenswrapper[4884]: I1210 00:31:39.995676 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:39Z","lastTransitionTime":"2025-12-10T00:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.098225 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.098275 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.098289 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.098311 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.098325 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.133614 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.133675 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.133694 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.133721 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.133739 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: E1210 00:31:40.148697 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:40Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.152917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.152966 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.152980 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.152997 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.153011 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: E1210 00:31:40.163879 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:40Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.173052 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.173110 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.173130 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.173158 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.173178 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: E1210 00:31:40.188497 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:40Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.194285 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.194333 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.194343 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.194363 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.194374 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: E1210 00:31:40.213707 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:40Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.218195 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.218225 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.218236 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.218252 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.218261 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: E1210 00:31:40.235879 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:40Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:40 crc kubenswrapper[4884]: E1210 00:31:40.236105 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.237407 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.237461 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.237473 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.237489 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.237500 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.286688 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:40 crc kubenswrapper[4884]: E1210 00:31:40.286817 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.339519 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.339602 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.339621 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.339643 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.339662 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.442166 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.442234 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.442253 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.442278 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.442298 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.544781 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.544897 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.544917 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.544940 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.544958 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.647937 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.648018 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.648042 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.648077 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.648103 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.751562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.751698 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.751771 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.751810 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.751835 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.854537 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.854592 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.854610 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.854633 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.854651 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.957555 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.957593 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.957604 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.957623 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:40 crc kubenswrapper[4884]: I1210 00:31:40.957633 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:40Z","lastTransitionTime":"2025-12-10T00:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.060997 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.061060 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.061076 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.061101 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.061117 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:41Z","lastTransitionTime":"2025-12-10T00:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.164575 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.164647 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.164670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.164700 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.164718 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:41Z","lastTransitionTime":"2025-12-10T00:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.268250 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.268313 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.268330 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.268358 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.268375 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:41Z","lastTransitionTime":"2025-12-10T00:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.286694 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.286721 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:41 crc kubenswrapper[4884]: E1210 00:31:41.286921 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.286991 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:41 crc kubenswrapper[4884]: E1210 00:31:41.287077 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:41 crc kubenswrapper[4884]: E1210 00:31:41.287228 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.371799 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.371861 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.371879 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.371903 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.371922 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:41Z","lastTransitionTime":"2025-12-10T00:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.474888 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.474953 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.474975 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.474999 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.475017 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:41Z","lastTransitionTime":"2025-12-10T00:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.578356 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.578474 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.578499 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.578526 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.578550 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:41Z","lastTransitionTime":"2025-12-10T00:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.681718 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.681802 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.681815 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.681839 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.681860 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:41Z","lastTransitionTime":"2025-12-10T00:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.785364 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.785424 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.785466 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.785496 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.785522 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:41Z","lastTransitionTime":"2025-12-10T00:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.889590 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.889662 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.889685 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.889715 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.889737 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:41Z","lastTransitionTime":"2025-12-10T00:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.993236 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.993391 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.993420 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.993481 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:41 crc kubenswrapper[4884]: I1210 00:31:41.993506 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:41Z","lastTransitionTime":"2025-12-10T00:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.099822 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.100185 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.100243 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.100277 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.100308 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:42Z","lastTransitionTime":"2025-12-10T00:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.204061 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.204533 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.204582 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.204606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.204624 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:42Z","lastTransitionTime":"2025-12-10T00:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.286305 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:42 crc kubenswrapper[4884]: E1210 00:31:42.286636 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.307870 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.307962 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.307999 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.308056 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.308077 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:42Z","lastTransitionTime":"2025-12-10T00:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.412700 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.412799 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.412824 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.412859 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.412883 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:42Z","lastTransitionTime":"2025-12-10T00:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.516115 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.516175 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.516193 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.516216 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.516232 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:42Z","lastTransitionTime":"2025-12-10T00:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.618968 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.619027 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.619051 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.619079 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.619101 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:42Z","lastTransitionTime":"2025-12-10T00:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.722224 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.722324 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.722350 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.722382 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.722406 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:42Z","lastTransitionTime":"2025-12-10T00:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.825196 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.825256 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.825270 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.825290 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.825306 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:42Z","lastTransitionTime":"2025-12-10T00:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.928063 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.928375 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.928539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.928749 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:42 crc kubenswrapper[4884]: I1210 00:31:42.928893 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:42Z","lastTransitionTime":"2025-12-10T00:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.032805 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.032869 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.032889 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.032915 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.032932 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:43Z","lastTransitionTime":"2025-12-10T00:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.135754 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.136560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.136736 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.136871 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.136997 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:43Z","lastTransitionTime":"2025-12-10T00:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.239723 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.240142 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.240302 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.240488 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.240625 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:43Z","lastTransitionTime":"2025-12-10T00:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.286926 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.286990 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:43 crc kubenswrapper[4884]: E1210 00:31:43.287184 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.287261 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:43 crc kubenswrapper[4884]: E1210 00:31:43.287381 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:43 crc kubenswrapper[4884]: E1210 00:31:43.287545 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.343866 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.343925 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.343945 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.343969 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.343990 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:43Z","lastTransitionTime":"2025-12-10T00:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.446830 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.446905 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.446919 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.446938 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.446949 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:43Z","lastTransitionTime":"2025-12-10T00:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.550929 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.550984 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.551000 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.551023 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.551036 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:43Z","lastTransitionTime":"2025-12-10T00:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.654417 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.654505 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.654518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.654542 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.654562 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:43Z","lastTransitionTime":"2025-12-10T00:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.758279 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.758360 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.758387 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.758420 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.758513 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:43Z","lastTransitionTime":"2025-12-10T00:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.862470 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.862562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.862580 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.862606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.862625 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:43Z","lastTransitionTime":"2025-12-10T00:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.966889 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.967109 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.967136 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.967168 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:43 crc kubenswrapper[4884]: I1210 00:31:43.967191 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:43Z","lastTransitionTime":"2025-12-10T00:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.070423 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.070539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.070558 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.070591 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.070613 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:44Z","lastTransitionTime":"2025-12-10T00:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.174200 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.174269 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.174292 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.174316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.174334 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:44Z","lastTransitionTime":"2025-12-10T00:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.277265 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.277337 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.277352 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.277381 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.277400 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:44Z","lastTransitionTime":"2025-12-10T00:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.286675 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:44 crc kubenswrapper[4884]: E1210 00:31:44.286876 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.380682 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.380752 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.380764 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.380794 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.380809 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:44Z","lastTransitionTime":"2025-12-10T00:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.486860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.486960 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.486985 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.487024 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.487061 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:44Z","lastTransitionTime":"2025-12-10T00:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.590670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.590755 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.590772 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.590800 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.590818 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:44Z","lastTransitionTime":"2025-12-10T00:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.694743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.694834 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.694864 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.694900 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.694921 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:44Z","lastTransitionTime":"2025-12-10T00:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.798810 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.798880 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.798898 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.798924 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.798942 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:44Z","lastTransitionTime":"2025-12-10T00:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.903051 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.903120 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.903137 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.903159 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:44 crc kubenswrapper[4884]: I1210 00:31:44.903177 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:44Z","lastTransitionTime":"2025-12-10T00:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.006882 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.006970 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.006990 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.007015 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.007034 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:45Z","lastTransitionTime":"2025-12-10T00:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.110940 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.110995 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.111007 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.111028 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.111040 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:45Z","lastTransitionTime":"2025-12-10T00:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.213602 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.213652 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.213669 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.213694 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.213712 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:45Z","lastTransitionTime":"2025-12-10T00:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.286928 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.286952 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.287125 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:45 crc kubenswrapper[4884]: E1210 00:31:45.287337 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:45 crc kubenswrapper[4884]: E1210 00:31:45.287591 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:45 crc kubenswrapper[4884]: E1210 00:31:45.287688 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.316621 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.316714 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.316736 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.316769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.316789 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:45Z","lastTransitionTime":"2025-12-10T00:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.419876 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.419939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.419965 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.419995 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.420012 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:45Z","lastTransitionTime":"2025-12-10T00:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.523202 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.523262 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.523280 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.523308 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.523326 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:45Z","lastTransitionTime":"2025-12-10T00:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.627351 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.627404 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.627421 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.627480 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.627499 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:45Z","lastTransitionTime":"2025-12-10T00:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.731472 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.731566 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.731592 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.731627 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.731655 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:45Z","lastTransitionTime":"2025-12-10T00:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.835422 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.835585 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.835606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.835633 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.835696 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:45Z","lastTransitionTime":"2025-12-10T00:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.938596 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.938656 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.938675 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.938697 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:45 crc kubenswrapper[4884]: I1210 00:31:45.938716 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:45Z","lastTransitionTime":"2025-12-10T00:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.041965 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.042009 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.042021 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.042048 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.042057 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:46Z","lastTransitionTime":"2025-12-10T00:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.145216 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.145270 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.145280 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.145299 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.145314 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:46Z","lastTransitionTime":"2025-12-10T00:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.248129 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.248191 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.248208 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.248234 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.248252 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:46Z","lastTransitionTime":"2025-12-10T00:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.286151 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:46 crc kubenswrapper[4884]: E1210 00:31:46.286366 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.352048 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.352118 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.352141 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.352174 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.352199 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:46Z","lastTransitionTime":"2025-12-10T00:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.455402 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.455520 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.455536 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.455556 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.455569 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:46Z","lastTransitionTime":"2025-12-10T00:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.558496 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.558570 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.558592 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.558617 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.558637 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:46Z","lastTransitionTime":"2025-12-10T00:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.662750 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.662808 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.662826 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.662854 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.662874 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:46Z","lastTransitionTime":"2025-12-10T00:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.765907 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.765974 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.765992 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.766015 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.766042 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:46Z","lastTransitionTime":"2025-12-10T00:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.869545 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.869609 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.869627 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.869655 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.869674 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:46Z","lastTransitionTime":"2025-12-10T00:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.980207 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.980275 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.980296 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.980323 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:46 crc kubenswrapper[4884]: I1210 00:31:46.980349 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:46Z","lastTransitionTime":"2025-12-10T00:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.091656 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.091721 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.091739 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.091771 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.091796 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:47Z","lastTransitionTime":"2025-12-10T00:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.194330 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.194424 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.194454 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.194476 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.194500 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:47Z","lastTransitionTime":"2025-12-10T00:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.286733 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.286872 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:47 crc kubenswrapper[4884]: E1210 00:31:47.286972 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.287069 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:47 crc kubenswrapper[4884]: E1210 00:31:47.287219 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:47 crc kubenswrapper[4884]: E1210 00:31:47.287347 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.297016 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.297088 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.297109 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.297139 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.297160 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:47Z","lastTransitionTime":"2025-12-10T00:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.309327 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.329698 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.350423 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.365733 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.388493 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.400465 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.400522 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.400539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.400566 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.400585 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:47Z","lastTransitionTime":"2025-12-10T00:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.406324 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"2025-12-10T00:30:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c\\\\n2025-12-10T00:30:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c to /host/opt/cni/bin/\\\\n2025-12-10T00:30:51Z [verbose] multus-daemon started\\\\n2025-12-10T00:30:51Z [verbose] Readiness Indicator file check\\\\n2025-12-10T00:31:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.425196 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.446090 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.468811 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.487080 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.503581 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.503647 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.503665 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.503689 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.503707 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:47Z","lastTransitionTime":"2025-12-10T00:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.507209 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.523775 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.538488 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.552836 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.574006 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.600902 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:24Z\\\",\\\"message\\\":\\\"e, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:24.373214 6602 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF1210 00:31:24.372907 6602 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z]\\\\nI1210 00:31:24.373233 6602 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1210 00:31:24.373115 6602 services_controller.go:443] Built service openshift-kube-storage-version-migrator-operator/metrics LB cluster-wide configs for network=default: []services.lbConfi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.607384 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.607472 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.607498 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.607535 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.607559 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:47Z","lastTransitionTime":"2025-12-10T00:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.618281 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:47Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.716963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.717045 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.717064 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.717097 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.717115 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:47Z","lastTransitionTime":"2025-12-10T00:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.820734 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.820850 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.820883 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.820923 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.820966 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:47Z","lastTransitionTime":"2025-12-10T00:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.926525 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.926589 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.926606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.926626 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:47 crc kubenswrapper[4884]: I1210 00:31:47.926638 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:47Z","lastTransitionTime":"2025-12-10T00:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.030217 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.030278 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.030297 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.030326 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.030346 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:48Z","lastTransitionTime":"2025-12-10T00:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.133138 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.133462 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.133554 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.133624 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.133692 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:48Z","lastTransitionTime":"2025-12-10T00:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.237474 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.237544 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.237569 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.237595 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.237614 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:48Z","lastTransitionTime":"2025-12-10T00:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.287255 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:48 crc kubenswrapper[4884]: E1210 00:31:48.287755 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.340279 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.340464 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.340544 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.340614 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.340711 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:48Z","lastTransitionTime":"2025-12-10T00:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.444307 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.444387 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.444414 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.444482 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.444512 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:48Z","lastTransitionTime":"2025-12-10T00:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.547907 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.547969 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.547989 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.548015 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.548034 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:48Z","lastTransitionTime":"2025-12-10T00:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.651683 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.651752 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.651774 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.651810 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.651835 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:48Z","lastTransitionTime":"2025-12-10T00:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.756600 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.756668 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.756692 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.756721 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.756741 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:48Z","lastTransitionTime":"2025-12-10T00:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.860911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.860980 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.861000 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.861031 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.861054 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:48Z","lastTransitionTime":"2025-12-10T00:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.934534 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.934729 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:48 crc kubenswrapper[4884]: E1210 00:31:48.934757 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:52.934716739 +0000 UTC m=+146.012673896 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.934829 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:48 crc kubenswrapper[4884]: E1210 00:31:48.934912 4884 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:31:48 crc kubenswrapper[4884]: E1210 00:31:48.934996 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:32:52.934970916 +0000 UTC m=+146.012928063 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 10 00:31:48 crc kubenswrapper[4884]: E1210 00:31:48.935078 4884 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:31:48 crc kubenswrapper[4884]: E1210 00:31:48.935141 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-10 00:32:52.935127481 +0000 UTC m=+146.013084638 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.965608 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.965702 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.965726 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.965763 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:48 crc kubenswrapper[4884]: I1210 00:31:48.965787 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:48Z","lastTransitionTime":"2025-12-10T00:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.036872 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.036945 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:49 crc kubenswrapper[4884]: E1210 00:31:49.037188 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:31:49 crc kubenswrapper[4884]: E1210 00:31:49.037244 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:31:49 crc kubenswrapper[4884]: E1210 00:31:49.037269 4884 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:31:49 crc kubenswrapper[4884]: E1210 00:31:49.037352 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-10 00:32:53.037324175 +0000 UTC m=+146.115281332 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:31:49 crc kubenswrapper[4884]: E1210 00:31:49.037350 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 10 00:31:49 crc kubenswrapper[4884]: E1210 00:31:49.037413 4884 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 10 00:31:49 crc kubenswrapper[4884]: E1210 00:31:49.037486 4884 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:31:49 crc kubenswrapper[4884]: E1210 00:31:49.037567 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-10 00:32:53.037541111 +0000 UTC m=+146.115498268 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.069670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.069747 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.069767 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.069797 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.069820 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:49Z","lastTransitionTime":"2025-12-10T00:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.172784 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.172853 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.172878 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.172914 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.172938 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:49Z","lastTransitionTime":"2025-12-10T00:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.277036 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.277116 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.277133 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.277167 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.277193 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:49Z","lastTransitionTime":"2025-12-10T00:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.286417 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:49 crc kubenswrapper[4884]: E1210 00:31:49.286635 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.286725 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:49 crc kubenswrapper[4884]: E1210 00:31:49.286859 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.287067 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:49 crc kubenswrapper[4884]: E1210 00:31:49.287479 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.380813 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.380951 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.380969 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.380996 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.381022 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:49Z","lastTransitionTime":"2025-12-10T00:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.485036 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.485125 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.485145 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.485182 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.485208 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:49Z","lastTransitionTime":"2025-12-10T00:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.589313 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.589388 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.589406 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.589461 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.589482 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:49Z","lastTransitionTime":"2025-12-10T00:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.693046 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.693121 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.693140 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.693169 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.693187 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:49Z","lastTransitionTime":"2025-12-10T00:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.797982 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.798081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.798116 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.798152 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.798175 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:49Z","lastTransitionTime":"2025-12-10T00:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.901092 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.901170 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.901195 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.901232 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:49 crc kubenswrapper[4884]: I1210 00:31:49.901261 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:49Z","lastTransitionTime":"2025-12-10T00:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.004162 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.004384 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.004409 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.004542 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.004568 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.108340 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.108410 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.108428 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.108493 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.108516 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.211223 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.211642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.211743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.211853 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.211961 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.286150 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:50 crc kubenswrapper[4884]: E1210 00:31:50.286940 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.287322 4884 scope.go:117] "RemoveContainer" containerID="a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.315762 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.315828 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.315852 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.315884 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.315907 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.386623 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.387095 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.387116 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.387144 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.387163 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: E1210 00:31:50.408579 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.413852 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.413977 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.414073 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.414198 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.414291 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: E1210 00:31:50.434172 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.441292 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.441579 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.441798 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.442071 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.442631 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: E1210 00:31:50.464064 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.469539 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.469614 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.469642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.469673 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.469697 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: E1210 00:31:50.490253 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.496084 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.496148 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.496171 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.496201 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.496221 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: E1210 00:31:50.518058 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:50Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:50 crc kubenswrapper[4884]: E1210 00:31:50.518290 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.520310 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.520355 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.520372 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.520394 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.520413 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.622997 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.623092 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.623149 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.623176 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.623193 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.726482 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.726546 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.726570 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.726600 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.726621 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.829342 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.829412 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.829469 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.829506 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.829532 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.932346 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.932387 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.932402 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.932426 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:50 crc kubenswrapper[4884]: I1210 00:31:50.932468 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:50Z","lastTransitionTime":"2025-12-10T00:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.037139 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.037241 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.037297 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.037326 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.037377 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:51Z","lastTransitionTime":"2025-12-10T00:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.141091 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.141151 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.141164 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.141186 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.141200 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:51Z","lastTransitionTime":"2025-12-10T00:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.245327 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.245391 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.245410 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.245465 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.245487 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:51Z","lastTransitionTime":"2025-12-10T00:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.286700 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.286746 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.286767 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:51 crc kubenswrapper[4884]: E1210 00:31:51.287148 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:51 crc kubenswrapper[4884]: E1210 00:31:51.287472 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:51 crc kubenswrapper[4884]: E1210 00:31:51.287530 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.302168 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.349417 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.349497 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.349508 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.349528 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.349540 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:51Z","lastTransitionTime":"2025-12-10T00:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.452769 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.452867 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.452895 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.452931 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.452954 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:51Z","lastTransitionTime":"2025-12-10T00:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.555615 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.555664 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.555675 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.555696 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.555714 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:51Z","lastTransitionTime":"2025-12-10T00:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.658405 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.658478 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.658492 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.658513 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.658529 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:51Z","lastTransitionTime":"2025-12-10T00:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.761955 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.762007 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.762017 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.762037 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.762051 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:51Z","lastTransitionTime":"2025-12-10T00:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.841712 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/2.log" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.844778 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209"} Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.845810 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.865019 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.865131 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.865157 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.865216 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.865240 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:51Z","lastTransitionTime":"2025-12-10T00:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.865625 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.878708 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.893737 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.907804 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.923231 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.950474 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:24Z\\\",\\\"message\\\":\\\"e, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:24.373214 6602 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF1210 00:31:24.372907 6602 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z]\\\\nI1210 00:31:24.373233 6602 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1210 00:31:24.373115 6602 services_controller.go:443] Built service openshift-kube-storage-version-migrator-operator/metrics LB cluster-wide configs for network=default: []services.lbConfi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.963947 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.968601 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.968637 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.968651 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.968674 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.968689 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:51Z","lastTransitionTime":"2025-12-10T00:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.982008 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:51 crc kubenswrapper[4884]: I1210 00:31:51.997578 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:51Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.009314 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.028295 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.049018 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.062593 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.072206 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.072264 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.072277 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.072300 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.072315 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:52Z","lastTransitionTime":"2025-12-10T00:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.075479 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.089355 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.102639 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"054b2b71-4def-4818-96b4-4ba8fa596dbf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86f6f80c7bc3d5e5a08cd2a5433cc67788c108786fa8096cdc37d991d6003d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f957e72695eba257f69c29126275be8c12e89f821841ad5f673a635feda617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66f957e72695eba257f69c29126275be8c12e89f821841ad5f673a635feda617\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.116174 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.133275 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"2025-12-10T00:30:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c\\\\n2025-12-10T00:30:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c to /host/opt/cni/bin/\\\\n2025-12-10T00:30:51Z [verbose] multus-daemon started\\\\n2025-12-10T00:30:51Z [verbose] Readiness Indicator file check\\\\n2025-12-10T00:31:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.175028 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.175093 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.175108 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.175129 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.175141 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:52Z","lastTransitionTime":"2025-12-10T00:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.278763 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.278868 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.278895 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.278926 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.278946 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:52Z","lastTransitionTime":"2025-12-10T00:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.286139 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:52 crc kubenswrapper[4884]: E1210 00:31:52.286355 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.382311 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.382386 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.382406 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.382471 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.382491 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:52Z","lastTransitionTime":"2025-12-10T00:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.485695 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.485745 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.485754 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.485774 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.485787 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:52Z","lastTransitionTime":"2025-12-10T00:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.590030 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.590095 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.590111 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.590141 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.590168 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:52Z","lastTransitionTime":"2025-12-10T00:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.693785 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.693880 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.693912 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.693946 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.693970 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:52Z","lastTransitionTime":"2025-12-10T00:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.796990 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.797060 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.797078 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.797107 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.797125 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:52Z","lastTransitionTime":"2025-12-10T00:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.851237 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/3.log" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.853264 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/2.log" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.857062 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209" exitCode=1 Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.857125 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209"} Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.857182 4884 scope.go:117] "RemoveContainer" containerID="a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.858281 4884 scope.go:117] "RemoveContainer" containerID="1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209" Dec 10 00:31:52 crc kubenswrapper[4884]: E1210 00:31:52.858596 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.874992 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.894151 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.900597 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.900682 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.900710 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.900746 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.900773 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:52Z","lastTransitionTime":"2025-12-10T00:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.911964 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.929489 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.944992 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"054b2b71-4def-4818-96b4-4ba8fa596dbf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86f6f80c7bc3d5e5a08cd2a5433cc67788c108786fa8096cdc37d991d6003d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f957e72695eba257f69c29126275be8c12e89f821841ad5f673a635feda617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66f957e72695eba257f69c29126275be8c12e89f821841ad5f673a635feda617\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.966351 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:52 crc kubenswrapper[4884]: I1210 00:31:52.987868 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"2025-12-10T00:30:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c\\\\n2025-12-10T00:30:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c to /host/opt/cni/bin/\\\\n2025-12-10T00:30:51Z [verbose] multus-daemon started\\\\n2025-12-10T00:30:51Z [verbose] Readiness Indicator file check\\\\n2025-12-10T00:31:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:52Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.003461 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.004006 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.004067 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.004086 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.004109 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.004122 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:53Z","lastTransitionTime":"2025-12-10T00:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.019696 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.034709 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.054543 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.073742 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.095608 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.108901 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.108989 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.109014 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.109051 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.109079 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:53Z","lastTransitionTime":"2025-12-10T00:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.134989 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a09b3940b7152104fb05c3a2ac9e9582021101121710443a5319f677b0b61287\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:24Z\\\",\\\"message\\\":\\\"e, internalTrafficLocal:false, hasNodePort:false}}\\\\nI1210 00:31:24.373214 6602 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nF1210 00:31:24.372907 6602 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:24Z is after 2025-08-24T17:21:41Z]\\\\nI1210 00:31:24.373233 6602 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-apiserver/kube-apiserver-crc after 0 failed attempt(s)\\\\nI1210 00:31:24.373115 6602 services_controller.go:443] Built service openshift-kube-storage-version-migrator-operator/metrics LB cluster-wide configs for network=default: []services.lbConfi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:23Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:52Z\\\",\\\"message\\\":\\\"] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:31:52.016957 6985 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 00:31:52.017260 6985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:31:52.018602 6985 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1210 00:31:52.018623 6985 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1210 00:31:52.018645 6985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 00:31:52.018671 6985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1210 00:31:52.018678 6985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1210 00:31:52.018724 6985 factory.go:656] Stopping watch factory\\\\nI1210 00:31:52.018744 6985 ovnkube.go:599] Stopped ovnkube\\\\nI1210 00:31:52.018781 6985 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 00:31:52.018814 6985 handler.go:208] Removed *v1.Node event handler 7\\\\nI1210 00:31:52.018822 6985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 00:31:52.018827 6985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 00\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.179301 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.204100 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.212546 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.212596 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.212609 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.212635 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.212649 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:53Z","lastTransitionTime":"2025-12-10T00:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.227933 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.241581 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.286165 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.286265 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:53 crc kubenswrapper[4884]: E1210 00:31:53.286400 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.286546 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:53 crc kubenswrapper[4884]: E1210 00:31:53.286822 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:53 crc kubenswrapper[4884]: E1210 00:31:53.287194 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.307122 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.315593 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.315640 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.315657 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.315680 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.315697 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:53Z","lastTransitionTime":"2025-12-10T00:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.419104 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.419180 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.419199 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.419226 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.419243 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:53Z","lastTransitionTime":"2025-12-10T00:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.523242 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.523310 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.523328 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.523353 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.523372 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:53Z","lastTransitionTime":"2025-12-10T00:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.626667 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.626733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.626752 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.626779 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.626798 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:53Z","lastTransitionTime":"2025-12-10T00:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.730331 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.730405 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.730466 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.730503 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.730525 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:53Z","lastTransitionTime":"2025-12-10T00:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.834362 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.834454 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.834475 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.834501 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.834521 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:53Z","lastTransitionTime":"2025-12-10T00:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.864127 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/3.log" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.870378 4884 scope.go:117] "RemoveContainer" containerID="1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209" Dec 10 00:31:53 crc kubenswrapper[4884]: E1210 00:31:53.870679 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.917536 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"054b2b71-4def-4818-96b4-4ba8fa596dbf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86f6f80c7bc3d5e5a08cd2a5433cc67788c108786fa8096cdc37d991d6003d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f957e72695eba257f69c29126275be8c12e89f821841ad5f673a635feda617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66f957e72695eba257f69c29126275be8c12e89f821841ad5f673a635feda617\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.936108 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.937377 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.937497 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.937537 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.937572 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.937599 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:53Z","lastTransitionTime":"2025-12-10T00:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.953957 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"2025-12-10T00:30:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c\\\\n2025-12-10T00:30:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c to /host/opt/cni/bin/\\\\n2025-12-10T00:30:51Z [verbose] multus-daemon started\\\\n2025-12-10T00:30:51Z [verbose] Readiness Indicator file check\\\\n2025-12-10T00:31:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.973906 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:53 crc kubenswrapper[4884]: I1210 00:31:53.996641 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:53Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.014249 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.036228 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.041982 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.042031 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.042048 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.042069 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.042081 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:54Z","lastTransitionTime":"2025-12-10T00:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.076185 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c500c92-8e7e-49b3-86e7-da528cb90217\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9733ccbaba989209badb1102eab9452f0d7928dd6428d5bb2202c3d82287c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fde2ec61c3c8887402ec16b0298d7f39f65a9f0c87ef10182296b57a78557788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0e14d544507c375e4ac580dd862428d2bc949396923b37ffd5b542ffa8a064\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://484e3a97dedf40d3b11b38d46480dab8cfa71bb15b2bd9f624efef19b39a4d27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f32e67e4f0a06af9e79943b87ad5499507233082994748fe5e84f389304f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90220bd853a970fedca4fce5e6394366f4c8ccfdf9397de2c0e8e7959491c067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90220bd853a970fedca4fce5e6394366f4c8ccfdf9397de2c0e8e7959491c067\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b99c92f4955eea98b72fd114d6b2cc32c28b86b650aa4933b9219a2e19597d1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b99c92f4955eea98b72fd114d6b2cc32c28b86b650aa4933b9219a2e19597d1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0b9037069b841636a787d5adb68e596da9538cd303fd621a7d912273c4917600\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b9037069b841636a787d5adb68e596da9538cd303fd621a7d912273c4917600\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.098264 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.121407 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.148017 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.148053 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.148066 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.148083 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.148093 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:54Z","lastTransitionTime":"2025-12-10T00:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.154073 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:52Z\\\",\\\"message\\\":\\\"] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:31:52.016957 6985 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 00:31:52.017260 6985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:31:52.018602 6985 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1210 00:31:52.018623 6985 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1210 00:31:52.018645 6985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 00:31:52.018671 6985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1210 00:31:52.018678 6985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1210 00:31:52.018724 6985 factory.go:656] Stopping watch factory\\\\nI1210 00:31:52.018744 6985 ovnkube.go:599] Stopped ovnkube\\\\nI1210 00:31:52.018781 6985 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 00:31:52.018814 6985 handler.go:208] Removed *v1.Node event handler 7\\\\nI1210 00:31:52.018822 6985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 00:31:52.018827 6985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 00\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.166052 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.186382 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.201651 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.212858 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.224612 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.242253 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.250655 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.250730 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.250751 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.250788 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.250813 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:54Z","lastTransitionTime":"2025-12-10T00:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.261514 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.276655 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:54Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.286851 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:54 crc kubenswrapper[4884]: E1210 00:31:54.287010 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.354797 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.354861 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.354880 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.354909 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.354927 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:54Z","lastTransitionTime":"2025-12-10T00:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.457568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.457635 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.457657 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.457687 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.457708 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:54Z","lastTransitionTime":"2025-12-10T00:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.561027 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.561101 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.561120 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.561150 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.561171 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:54Z","lastTransitionTime":"2025-12-10T00:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.664012 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.664070 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.664093 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.664123 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.664145 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:54Z","lastTransitionTime":"2025-12-10T00:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.766642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.766715 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.766734 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.766763 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.766781 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:54Z","lastTransitionTime":"2025-12-10T00:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.871048 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.871232 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.871362 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.871404 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.871428 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:54Z","lastTransitionTime":"2025-12-10T00:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.974289 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.974358 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.974377 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.974403 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:54 crc kubenswrapper[4884]: I1210 00:31:54.974420 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:54Z","lastTransitionTime":"2025-12-10T00:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.077062 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.077120 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.077137 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.077161 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.077183 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:55Z","lastTransitionTime":"2025-12-10T00:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.180540 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.180590 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.180603 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.180622 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.180634 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:55Z","lastTransitionTime":"2025-12-10T00:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.283351 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.283905 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.283929 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.283963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.283988 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:55Z","lastTransitionTime":"2025-12-10T00:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.286400 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.286463 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.286497 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:55 crc kubenswrapper[4884]: E1210 00:31:55.286635 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:55 crc kubenswrapper[4884]: E1210 00:31:55.286809 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:55 crc kubenswrapper[4884]: E1210 00:31:55.287187 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.387424 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.387535 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.387554 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.387587 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.387606 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:55Z","lastTransitionTime":"2025-12-10T00:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.490521 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.490608 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.490637 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.490676 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.490701 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:55Z","lastTransitionTime":"2025-12-10T00:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.593591 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.593668 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.593694 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.593722 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.593739 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:55Z","lastTransitionTime":"2025-12-10T00:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.696858 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.696912 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.696924 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.696943 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.696963 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:55Z","lastTransitionTime":"2025-12-10T00:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.800787 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.800920 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.801003 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.801039 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.801066 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:55Z","lastTransitionTime":"2025-12-10T00:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.905570 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.905652 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.905677 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.905707 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:55 crc kubenswrapper[4884]: I1210 00:31:55.905736 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:55Z","lastTransitionTime":"2025-12-10T00:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.008839 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.008903 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.008921 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.008947 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.008968 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:56Z","lastTransitionTime":"2025-12-10T00:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.112966 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.113037 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.113055 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.113081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.113099 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:56Z","lastTransitionTime":"2025-12-10T00:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.216081 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.216144 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.216162 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.216186 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.216212 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:56Z","lastTransitionTime":"2025-12-10T00:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.286300 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:56 crc kubenswrapper[4884]: E1210 00:31:56.286568 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.319682 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.319749 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.319770 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.319802 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.319827 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:56Z","lastTransitionTime":"2025-12-10T00:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.423530 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.423593 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.423611 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.423638 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.423656 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:56Z","lastTransitionTime":"2025-12-10T00:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.528001 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.528079 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.528103 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.528134 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.528155 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:56Z","lastTransitionTime":"2025-12-10T00:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.631799 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.631845 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.631858 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.631878 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.631889 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:56Z","lastTransitionTime":"2025-12-10T00:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.734990 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.735052 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.735072 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.735102 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.735122 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:56Z","lastTransitionTime":"2025-12-10T00:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.839198 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.839269 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.839289 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.839317 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.839336 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:56Z","lastTransitionTime":"2025-12-10T00:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.941829 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.941898 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.941916 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.941944 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:56 crc kubenswrapper[4884]: I1210 00:31:56.941961 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:56Z","lastTransitionTime":"2025-12-10T00:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.044930 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.045002 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.045024 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.045052 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.045071 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:57Z","lastTransitionTime":"2025-12-10T00:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.148241 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.148307 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.148326 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.148353 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.148370 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:57Z","lastTransitionTime":"2025-12-10T00:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.252017 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.252076 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.252099 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.252127 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.252149 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:57Z","lastTransitionTime":"2025-12-10T00:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.286649 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.286789 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.287747 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:57 crc kubenswrapper[4884]: E1210 00:31:57.287920 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:57 crc kubenswrapper[4884]: E1210 00:31:57.288111 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:57 crc kubenswrapper[4884]: E1210 00:31:57.288407 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.308044 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.330292 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.355492 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.355568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.355585 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.355613 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.355632 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:57Z","lastTransitionTime":"2025-12-10T00:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.367365 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c500c92-8e7e-49b3-86e7-da528cb90217\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9733ccbaba989209badb1102eab9452f0d7928dd6428d5bb2202c3d82287c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fde2ec61c3c8887402ec16b0298d7f39f65a9f0c87ef10182296b57a78557788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0e14d544507c375e4ac580dd862428d2bc949396923b37ffd5b542ffa8a064\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://484e3a97dedf40d3b11b38d46480dab8cfa71bb15b2bd9f624efef19b39a4d27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f32e67e4f0a06af9e79943b87ad5499507233082994748fe5e84f389304f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90220bd853a970fedca4fce5e6394366f4c8ccfdf9397de2c0e8e7959491c067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90220bd853a970fedca4fce5e6394366f4c8ccfdf9397de2c0e8e7959491c067\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b99c92f4955eea98b72fd114d6b2cc32c28b86b650aa4933b9219a2e19597d1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b99c92f4955eea98b72fd114d6b2cc32c28b86b650aa4933b9219a2e19597d1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0b9037069b841636a787d5adb68e596da9538cd303fd621a7d912273c4917600\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b9037069b841636a787d5adb68e596da9538cd303fd621a7d912273c4917600\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.390178 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.413274 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.437488 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.455037 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.458537 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.458586 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.458603 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.458629 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.458647 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:57Z","lastTransitionTime":"2025-12-10T00:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.478290 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.498069 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.517259 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.538962 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.561971 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.562094 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.562115 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.562145 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.562171 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:57Z","lastTransitionTime":"2025-12-10T00:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.576398 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:52Z\\\",\\\"message\\\":\\\"] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:31:52.016957 6985 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 00:31:52.017260 6985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:31:52.018602 6985 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1210 00:31:52.018623 6985 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1210 00:31:52.018645 6985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 00:31:52.018671 6985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1210 00:31:52.018678 6985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1210 00:31:52.018724 6985 factory.go:656] Stopping watch factory\\\\nI1210 00:31:52.018744 6985 ovnkube.go:599] Stopped ovnkube\\\\nI1210 00:31:52.018781 6985 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 00:31:52.018814 6985 handler.go:208] Removed *v1.Node event handler 7\\\\nI1210 00:31:52.018822 6985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 00:31:52.018827 6985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 00\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.597898 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.619408 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.639351 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.659178 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.665360 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.665420 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.665462 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.665491 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.665510 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:57Z","lastTransitionTime":"2025-12-10T00:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.677403 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"054b2b71-4def-4818-96b4-4ba8fa596dbf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86f6f80c7bc3d5e5a08cd2a5433cc67788c108786fa8096cdc37d991d6003d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f957e72695eba257f69c29126275be8c12e89f821841ad5f673a635feda617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66f957e72695eba257f69c29126275be8c12e89f821841ad5f673a635feda617\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.706005 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.729005 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"2025-12-10T00:30:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c\\\\n2025-12-10T00:30:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c to /host/opt/cni/bin/\\\\n2025-12-10T00:30:51Z [verbose] multus-daemon started\\\\n2025-12-10T00:30:51Z [verbose] Readiness Indicator file check\\\\n2025-12-10T00:31:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:31:57Z is after 2025-08-24T17:21:41Z" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.793069 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.793177 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.793197 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.793225 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.793242 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:57Z","lastTransitionTime":"2025-12-10T00:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.895619 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.895681 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.895701 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.895727 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.895746 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:57Z","lastTransitionTime":"2025-12-10T00:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.999261 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.999354 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.999373 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.999403 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:57 crc kubenswrapper[4884]: I1210 00:31:57.999422 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:57Z","lastTransitionTime":"2025-12-10T00:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.103201 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.103274 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.103293 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.103324 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.103345 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:58Z","lastTransitionTime":"2025-12-10T00:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.206170 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.206247 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.206272 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.206304 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.206325 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:58Z","lastTransitionTime":"2025-12-10T00:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.286749 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:31:58 crc kubenswrapper[4884]: E1210 00:31:58.287230 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.309584 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.309653 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.309672 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.309701 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.309724 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:58Z","lastTransitionTime":"2025-12-10T00:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.413730 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.413828 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.413859 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.413894 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.413926 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:58Z","lastTransitionTime":"2025-12-10T00:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.517317 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.517374 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.517386 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.517412 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.517426 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:58Z","lastTransitionTime":"2025-12-10T00:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.621302 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.621404 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.621423 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.621473 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.621492 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:58Z","lastTransitionTime":"2025-12-10T00:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.724882 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.724943 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.724954 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.724980 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.724997 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:58Z","lastTransitionTime":"2025-12-10T00:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.830346 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.830425 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.830462 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.830486 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.830499 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:58Z","lastTransitionTime":"2025-12-10T00:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.934552 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.934619 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.934637 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.934666 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:58 crc kubenswrapper[4884]: I1210 00:31:58.934686 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:58Z","lastTransitionTime":"2025-12-10T00:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.037892 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.037943 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.037961 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.037985 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.038005 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:59Z","lastTransitionTime":"2025-12-10T00:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.141736 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.141804 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.141823 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.141849 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.141868 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:59Z","lastTransitionTime":"2025-12-10T00:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.245344 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.245397 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.245418 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.245476 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.245495 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:59Z","lastTransitionTime":"2025-12-10T00:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.286288 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:31:59 crc kubenswrapper[4884]: E1210 00:31:59.286536 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.286748 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:31:59 crc kubenswrapper[4884]: E1210 00:31:59.286962 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.287045 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:31:59 crc kubenswrapper[4884]: E1210 00:31:59.287247 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.348518 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.348584 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.348602 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.348629 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.348646 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:59Z","lastTransitionTime":"2025-12-10T00:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.453065 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.453143 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.453165 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.453248 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.453275 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:59Z","lastTransitionTime":"2025-12-10T00:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.556669 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.556735 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.556756 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.556784 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.556808 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:59Z","lastTransitionTime":"2025-12-10T00:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.660225 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.660350 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.660404 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.660486 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.660509 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:59Z","lastTransitionTime":"2025-12-10T00:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.765490 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.765576 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.765625 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.765662 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.765687 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:59Z","lastTransitionTime":"2025-12-10T00:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.869425 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.869531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.869548 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.869574 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.869594 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:59Z","lastTransitionTime":"2025-12-10T00:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.972665 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.972732 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.972750 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.972776 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:31:59 crc kubenswrapper[4884]: I1210 00:31:59.972795 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:31:59Z","lastTransitionTime":"2025-12-10T00:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.075996 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.076069 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.076091 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.076119 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.076138 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.179159 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.179191 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.179201 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.179216 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.179225 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.282473 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.282527 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.282541 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.282560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.282572 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.287038 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:00 crc kubenswrapper[4884]: E1210 00:32:00.287213 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.386303 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.386349 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.386362 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.386383 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.386398 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.489118 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.489178 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.489196 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.489223 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.489241 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.591754 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.591820 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.591837 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.591866 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.591888 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.614562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.614606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.614624 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.614647 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.614664 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: E1210 00:32:00.636685 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.642513 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.642573 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.642592 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.642616 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.642645 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: E1210 00:32:00.663896 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.670354 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.670400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.670417 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.670468 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.670486 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: E1210 00:32:00.692629 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.698698 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.698749 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.698767 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.698788 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.698807 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: E1210 00:32:00.720738 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.725719 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.725783 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.725801 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.725831 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.725851 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: E1210 00:32:00.749278 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:00Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:00Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:00 crc kubenswrapper[4884]: E1210 00:32:00.749561 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.751885 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.751934 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.751950 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.752008 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.752026 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.855550 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.855607 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.855625 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.855650 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.855669 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.959341 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.959416 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.959473 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.959506 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:00 crc kubenswrapper[4884]: I1210 00:32:00.959529 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:00Z","lastTransitionTime":"2025-12-10T00:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.063011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.063068 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.063085 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.063108 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.063127 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:01Z","lastTransitionTime":"2025-12-10T00:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.166000 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.166053 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.166065 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.166084 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.166099 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:01Z","lastTransitionTime":"2025-12-10T00:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.269071 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.269136 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.269154 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.269184 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.269204 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:01Z","lastTransitionTime":"2025-12-10T00:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.286362 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.286453 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.286569 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:01 crc kubenswrapper[4884]: E1210 00:32:01.286684 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:01 crc kubenswrapper[4884]: E1210 00:32:01.286852 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:01 crc kubenswrapper[4884]: E1210 00:32:01.286945 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.371681 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.371749 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.371767 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.371795 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.371815 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:01Z","lastTransitionTime":"2025-12-10T00:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.476707 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.476815 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.476845 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.476886 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.476925 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:01Z","lastTransitionTime":"2025-12-10T00:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.581883 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.581941 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.581958 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.581983 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.582001 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:01Z","lastTransitionTime":"2025-12-10T00:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.685023 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.685085 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.685104 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.685131 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.685150 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:01Z","lastTransitionTime":"2025-12-10T00:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.788744 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.788815 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.788829 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.788852 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.788864 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:01Z","lastTransitionTime":"2025-12-10T00:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.891790 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.891841 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.891855 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.891874 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.891887 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:01Z","lastTransitionTime":"2025-12-10T00:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.995238 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.995323 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.995348 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.995379 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:01 crc kubenswrapper[4884]: I1210 00:32:01.995407 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:01Z","lastTransitionTime":"2025-12-10T00:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.098700 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.098771 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.098799 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.098831 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.098855 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:02Z","lastTransitionTime":"2025-12-10T00:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.201835 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.201880 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.201891 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.202152 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.202186 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:02Z","lastTransitionTime":"2025-12-10T00:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.287169 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:02 crc kubenswrapper[4884]: E1210 00:32:02.287660 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.305348 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.305416 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.305460 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.305484 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.305504 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:02Z","lastTransitionTime":"2025-12-10T00:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.407603 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.407636 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.407646 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.407659 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.407669 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:02Z","lastTransitionTime":"2025-12-10T00:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.511021 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.511086 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.511110 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.511144 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.511168 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:02Z","lastTransitionTime":"2025-12-10T00:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.614064 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.614124 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.614136 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.614160 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.614181 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:02Z","lastTransitionTime":"2025-12-10T00:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.717254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.717338 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.717351 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.717376 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.717394 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:02Z","lastTransitionTime":"2025-12-10T00:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.820235 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.820315 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.820336 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.820368 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.820389 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:02Z","lastTransitionTime":"2025-12-10T00:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.922806 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.922875 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.922896 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.922925 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:02 crc kubenswrapper[4884]: I1210 00:32:02.922944 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:02Z","lastTransitionTime":"2025-12-10T00:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.026062 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.026135 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.026158 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.026190 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.026214 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:03Z","lastTransitionTime":"2025-12-10T00:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.129716 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.129811 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.129850 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.129891 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.129915 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:03Z","lastTransitionTime":"2025-12-10T00:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.232089 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.232144 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.232156 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.232170 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.232197 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:03Z","lastTransitionTime":"2025-12-10T00:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.286132 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.286174 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.286151 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:03 crc kubenswrapper[4884]: E1210 00:32:03.286320 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:03 crc kubenswrapper[4884]: E1210 00:32:03.286512 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:03 crc kubenswrapper[4884]: E1210 00:32:03.286674 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.335815 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.335868 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.335887 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.335911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.335929 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:03Z","lastTransitionTime":"2025-12-10T00:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.439726 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.439841 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.439860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.440269 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.440641 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:03Z","lastTransitionTime":"2025-12-10T00:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.544841 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.544919 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.544937 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.544967 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.544986 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:03Z","lastTransitionTime":"2025-12-10T00:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.648159 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.648240 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.648270 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.648305 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.648329 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:03Z","lastTransitionTime":"2025-12-10T00:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.752354 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.752422 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.752473 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.752502 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.752526 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:03Z","lastTransitionTime":"2025-12-10T00:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.855883 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.855953 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.855971 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.855999 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.856020 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:03Z","lastTransitionTime":"2025-12-10T00:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.958844 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.958932 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.958960 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.958993 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:03 crc kubenswrapper[4884]: I1210 00:32:03.959016 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:03Z","lastTransitionTime":"2025-12-10T00:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.063289 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.063833 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.064059 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.064278 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.064539 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:04Z","lastTransitionTime":"2025-12-10T00:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.169167 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.169248 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.169268 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.169675 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.169885 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:04Z","lastTransitionTime":"2025-12-10T00:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.273798 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.273868 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.273885 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.273910 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.273929 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:04Z","lastTransitionTime":"2025-12-10T00:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.287121 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:04 crc kubenswrapper[4884]: E1210 00:32:04.287527 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.377748 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.377820 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.377838 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.377864 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.377884 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:04Z","lastTransitionTime":"2025-12-10T00:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.480967 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.481048 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.481070 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.481098 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.481118 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:04Z","lastTransitionTime":"2025-12-10T00:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.584191 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.584299 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.584316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.584340 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.584355 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:04Z","lastTransitionTime":"2025-12-10T00:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.687566 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.687637 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.687656 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.687685 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.687703 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:04Z","lastTransitionTime":"2025-12-10T00:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.791427 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.791600 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.791627 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.791666 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.791698 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:04Z","lastTransitionTime":"2025-12-10T00:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.895206 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.895307 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.895325 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.895356 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.895379 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:04Z","lastTransitionTime":"2025-12-10T00:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.998651 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.998730 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.998747 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.998776 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:04 crc kubenswrapper[4884]: I1210 00:32:04.998799 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:04Z","lastTransitionTime":"2025-12-10T00:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.102519 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.102596 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.102618 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.102646 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.102670 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:05Z","lastTransitionTime":"2025-12-10T00:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.207290 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.207365 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.207386 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.207419 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.207473 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:05Z","lastTransitionTime":"2025-12-10T00:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.286729 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.286772 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.286871 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:05 crc kubenswrapper[4884]: E1210 00:32:05.286953 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:05 crc kubenswrapper[4884]: E1210 00:32:05.287323 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:05 crc kubenswrapper[4884]: E1210 00:32:05.287179 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.310750 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.310813 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.310831 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.310858 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.310876 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:05Z","lastTransitionTime":"2025-12-10T00:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.414155 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.414219 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.414240 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.414268 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.414287 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:05Z","lastTransitionTime":"2025-12-10T00:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.517200 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.517256 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.517273 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.517298 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.517316 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:05Z","lastTransitionTime":"2025-12-10T00:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.620188 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.620244 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.620260 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.620285 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.620302 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:05Z","lastTransitionTime":"2025-12-10T00:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.723594 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.723686 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.723707 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.723731 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.723749 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:05Z","lastTransitionTime":"2025-12-10T00:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.827810 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.827906 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.827953 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.827985 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.828011 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:05Z","lastTransitionTime":"2025-12-10T00:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.931043 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.931109 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.931140 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.931167 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:05 crc kubenswrapper[4884]: I1210 00:32:05.931190 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:05Z","lastTransitionTime":"2025-12-10T00:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.034508 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.034575 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.034594 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.034622 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.034646 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:06Z","lastTransitionTime":"2025-12-10T00:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.138017 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.138093 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.138114 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.138140 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.138159 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:06Z","lastTransitionTime":"2025-12-10T00:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.241285 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.241346 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.241355 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.241375 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.241387 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:06Z","lastTransitionTime":"2025-12-10T00:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.287082 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:06 crc kubenswrapper[4884]: E1210 00:32:06.287282 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.344765 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.344831 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.344844 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.344868 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.344887 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:06Z","lastTransitionTime":"2025-12-10T00:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.448189 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.448260 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.448269 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.448288 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.448299 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:06Z","lastTransitionTime":"2025-12-10T00:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.551730 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.551812 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.551831 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.551864 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.551901 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:06Z","lastTransitionTime":"2025-12-10T00:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.655262 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.655354 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.655385 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.655420 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.655488 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:06Z","lastTransitionTime":"2025-12-10T00:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.758688 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.758813 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.758834 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.758863 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.758882 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:06Z","lastTransitionTime":"2025-12-10T00:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.867937 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.868011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.868033 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.868074 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.868100 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:06Z","lastTransitionTime":"2025-12-10T00:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.971810 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.971893 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.971920 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.971959 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:06 crc kubenswrapper[4884]: I1210 00:32:06.971984 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:06Z","lastTransitionTime":"2025-12-10T00:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.075297 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.075354 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.075391 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.075426 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.075490 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:07Z","lastTransitionTime":"2025-12-10T00:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.179066 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.179142 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.179167 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.179195 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.179214 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:07Z","lastTransitionTime":"2025-12-10T00:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.282932 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.283103 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.283141 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.283176 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.283200 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:07Z","lastTransitionTime":"2025-12-10T00:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.286391 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.286539 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.286412 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:07 crc kubenswrapper[4884]: E1210 00:32:07.286652 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:07 crc kubenswrapper[4884]: E1210 00:32:07.286782 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:07 crc kubenswrapper[4884]: E1210 00:32:07.286944 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.305522 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"054b2b71-4def-4818-96b4-4ba8fa596dbf\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86f6f80c7bc3d5e5a08cd2a5433cc67788c108786fa8096cdc37d991d6003d8d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66f957e72695eba257f69c29126275be8c12e89f821841ad5f673a635feda617\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66f957e72695eba257f69c29126275be8c12e89f821841ad5f673a635feda617\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.327584 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.346307 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rcj68" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0269081f-f135-4e66-91fd-a16277a00355\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:36Z\\\",\\\"message\\\":\\\"2025-12-10T00:30:51+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c\\\\n2025-12-10T00:30:51+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_5d71b5e4-312b-450b-8730-f6c9d9a9b57c to /host/opt/cni/bin/\\\\n2025-12-10T00:30:51Z [verbose] multus-daemon started\\\\n2025-12-10T00:30:51Z [verbose] Readiness Indicator file check\\\\n2025-12-10T00:31:36Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22hrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rcj68\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.366509 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2ef3a487-4343-4722-a276-b4b20b975313\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e47937319426f2409e01489437b194ee9832f9b040bd86c7ee77a980b641c289\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0658e51da5aab87d26f0854420335df463cdafca7d72d2d4a1621eef1d0404e0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b715975902c2dc1fda1652aa275dfbcb1c7b8ad57a9756abc63e7fa7ff7ef263\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.386200 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.386247 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.386265 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.386289 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.386305 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:07Z","lastTransitionTime":"2025-12-10T00:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.399099 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c500c92-8e7e-49b3-86e7-da528cb90217\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2d9733ccbaba989209badb1102eab9452f0d7928dd6428d5bb2202c3d82287c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fde2ec61c3c8887402ec16b0298d7f39f65a9f0c87ef10182296b57a78557788\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0e14d544507c375e4ac580dd862428d2bc949396923b37ffd5b542ffa8a064\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://484e3a97dedf40d3b11b38d46480dab8cfa71bb15b2bd9f624efef19b39a4d27\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c6f32e67e4f0a06af9e79943b87ad5499507233082994748fe5e84f389304f13\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://90220bd853a970fedca4fce5e6394366f4c8ccfdf9397de2c0e8e7959491c067\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://90220bd853a970fedca4fce5e6394366f4c8ccfdf9397de2c0e8e7959491c067\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b99c92f4955eea98b72fd114d6b2cc32c28b86b650aa4933b9219a2e19597d1e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b99c92f4955eea98b72fd114d6b2cc32c28b86b650aa4933b9219a2e19597d1e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://0b9037069b841636a787d5adb68e596da9538cd303fd621a7d912273c4917600\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0b9037069b841636a787d5adb68e596da9538cd303fd621a7d912273c4917600\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.415080 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.434610 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a21ea9658056231a6308536cac081ae4ce5377e95be234e38aff71358c8fe6fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.453770 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.472697 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3f9331b2-7b49-4204-b48a-1ae5e4f4a8bd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://22d35d3e75958ea439b01e611b08b3343a9bebb23f281174809ef53bbe6d2e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3b44fc392e17cb2a2b675e28830b8a8708f58f9865dc58ef496cdde609296691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:31:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dpcw8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:03Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-bwm7p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.489604 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.489674 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.489690 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.489716 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.489731 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:07Z","lastTransitionTime":"2025-12-10T00:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.497965 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b93f82f-1fc6-48da-9c2b-12b05ead2f08\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-10T00:30:45Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1210 00:30:39.974411 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1210 00:30:39.977883 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-4210716759/tls.crt::/tmp/serving-cert-4210716759/tls.key\\\\\\\"\\\\nI1210 00:30:45.289143 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1210 00:30:45.291671 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1210 00:30:45.291691 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1210 00:30:45.291716 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1210 00:30:45.291721 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1210 00:30:45.296310 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1210 00:30:45.296342 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296348 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1210 00:30:45.296352 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1210 00:30:45.296353 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1210 00:30:45.296355 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1210 00:30:45.296376 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1210 00:30:45.296402 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1210 00:30:45.299369 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.525743 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:46Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f6b8a4f7d23f5bae30a72379bddf313e6f37b35ed53e7d7a2bb1b24bdf860295\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4760dac5897705424a2c883125f007a71524b66aafe0ded1c3006635577c0121\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.544900 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pxpwg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"86246072-0dd6-41fb-878f-715a35fd98ce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://faf3a9fd148c027f6f385c0956bcf664dd093c2fb1778f4bea508576ae8f0635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lvb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pxpwg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.563495 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1cf4d2f3-df88-48d0-a8f1-19e1fee3fae7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a2a4f55cb2b4b0cad4e66ff7feb51d85b5b5098c13220245d5d89cf3d77c8284\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ee5d152846d8324097beb386711558e62f59412a70aa76250f5af7572510790\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d91298c4cd4ea5827ffc3257ed2f38a657491d697d798da25007b083ee397006\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://89413ec517265d25505fec4616c26f5c5e4e24a893c12cfcb4764b9eb1c02e10\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0ed1160fba01ef72bc660db2c61c3f00671719a501abbb17095152da040ac072\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1951199a15ffe5d81596b7a83ec5153cab9b3ab30c6bdcae3178cfa72d72ca34\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:54Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e201134cf35dd97e7ce7f7872e70bd3d8daee3a737349d8f16bb9a2890ec6bad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:55Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rchwx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-bc4r6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.592316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.592611 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.592628 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.592651 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.592669 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:07Z","lastTransitionTime":"2025-12-10T00:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.598088 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7022e894-7a34-4a84-8b18-e4440e11e659\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-10T00:31:52Z\\\",\\\"message\\\":\\\"] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:31:52.016957 6985 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1210 00:31:52.017260 6985 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1210 00:31:52.018602 6985 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1210 00:31:52.018623 6985 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1210 00:31:52.018645 6985 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1210 00:31:52.018671 6985 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1210 00:31:52.018678 6985 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1210 00:31:52.018724 6985 factory.go:656] Stopping watch factory\\\\nI1210 00:31:52.018744 6985 ovnkube.go:599] Stopped ovnkube\\\\nI1210 00:31:52.018781 6985 handler.go:208] Removed *v1.Node event handler 2\\\\nI1210 00:31:52.018814 6985 handler.go:208] Removed *v1.Node event handler 7\\\\nI1210 00:31:52.018822 6985 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1210 00:31:52.018827 6985 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1210 00\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-10T00:31:51Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c9hvx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-g8w62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.617483 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ec324800-e820-40c0-8b51-b020075f09eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:05Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dwpg9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:31:05Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-ndwnl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.633907 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b7061feb-ad2d-4df2-a447-1ef2a96384de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:31:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://db5012aec8bb0d2d1d2c3b2a18ade9650b884f68483711ae3ceaf415c9ecdd6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f83c84fd006dcf0a1fa1814b1a5ece978d85bb179cf53994c25033a51cc6822c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aafd8d6e43cd48c8295e4b16eaad70828e8b9958dd2a3692e7929bfa37d909b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a6e130c137a37a23ed53f533a3ccdcd631abe53595fd3b1e291c0c4fed9732b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-10T00:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-10T00:30:28Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:27Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.649695 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://50e12f7df8c4ce0e8b5395c150c4c60e03aee0d2ebb868ca2c115fca84a72b5b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.666826 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://29f6ff6b7483a6af65656caf7c5334537da4652da16c32ce261d5b1ecaee3efd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6nqsm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:50Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-8zcgx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.680633 4884 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4dtwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae121743-2040-4f84-8bb7-4a04cff7bd31\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-10T00:30:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c621ae6c4a2f02b4aa708452f96f842b91a751e814fe6b3a0dda2b31a99b6ecf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-10T00:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7zlgc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-10T00:30:52Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4dtwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:07Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.696872 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.696940 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.696960 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.696989 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.697011 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:07Z","lastTransitionTime":"2025-12-10T00:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.800861 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.800939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.800963 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.800996 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.801020 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:07Z","lastTransitionTime":"2025-12-10T00:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.905140 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.905241 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.905257 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.905279 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:07 crc kubenswrapper[4884]: I1210 00:32:07.905295 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:07Z","lastTransitionTime":"2025-12-10T00:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.008888 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.008960 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.008981 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.009010 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.009029 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:08Z","lastTransitionTime":"2025-12-10T00:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.112016 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.112059 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.112069 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.112086 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.112096 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:08Z","lastTransitionTime":"2025-12-10T00:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.215033 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.215079 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.215088 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.215104 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.215114 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:08Z","lastTransitionTime":"2025-12-10T00:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.286825 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:08 crc kubenswrapper[4884]: E1210 00:32:08.287029 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.288326 4884 scope.go:117] "RemoveContainer" containerID="1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209" Dec 10 00:32:08 crc kubenswrapper[4884]: E1210 00:32:08.288660 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.318787 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.318847 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.318865 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.318892 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.318913 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:08Z","lastTransitionTime":"2025-12-10T00:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.421777 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.421822 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.421834 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.421849 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.421859 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:08Z","lastTransitionTime":"2025-12-10T00:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.525221 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.525273 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.525293 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.525317 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.525336 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:08Z","lastTransitionTime":"2025-12-10T00:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.629253 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.629338 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.629363 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.629398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.629423 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:08Z","lastTransitionTime":"2025-12-10T00:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.732911 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.732973 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.732992 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.733013 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.733029 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:08Z","lastTransitionTime":"2025-12-10T00:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.836375 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.836475 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.836496 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.836528 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.836551 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:08Z","lastTransitionTime":"2025-12-10T00:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.940110 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.940175 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.940199 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.940231 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:08 crc kubenswrapper[4884]: I1210 00:32:08.940253 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:08Z","lastTransitionTime":"2025-12-10T00:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.043816 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.044232 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.044393 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.044646 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.044838 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:09Z","lastTransitionTime":"2025-12-10T00:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.148590 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.148665 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.148691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.148722 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.148745 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:09Z","lastTransitionTime":"2025-12-10T00:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.198763 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:09 crc kubenswrapper[4884]: E1210 00:32:09.198957 4884 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:32:09 crc kubenswrapper[4884]: E1210 00:32:09.199042 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs podName:ec324800-e820-40c0-8b51-b020075f09eb nodeName:}" failed. No retries permitted until 2025-12-10 00:33:13.199015774 +0000 UTC m=+166.276972931 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs") pod "network-metrics-daemon-ndwnl" (UID: "ec324800-e820-40c0-8b51-b020075f09eb") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.252553 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.252604 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.252628 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.252658 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.252680 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:09Z","lastTransitionTime":"2025-12-10T00:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.286499 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.286553 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:09 crc kubenswrapper[4884]: E1210 00:32:09.286693 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.286718 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:09 crc kubenswrapper[4884]: E1210 00:32:09.286794 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:09 crc kubenswrapper[4884]: E1210 00:32:09.286909 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.355977 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.356075 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.356094 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.356124 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.356141 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:09Z","lastTransitionTime":"2025-12-10T00:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.459592 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.459665 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.459685 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.459713 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.459735 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:09Z","lastTransitionTime":"2025-12-10T00:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.562957 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.563040 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.563061 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.563095 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.563119 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:09Z","lastTransitionTime":"2025-12-10T00:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.666711 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.666778 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.666801 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.666832 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.666855 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:09Z","lastTransitionTime":"2025-12-10T00:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.770010 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.770090 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.770113 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.770146 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.770171 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:09Z","lastTransitionTime":"2025-12-10T00:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.875304 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.875381 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.875400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.875470 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.875500 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:09Z","lastTransitionTime":"2025-12-10T00:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.978838 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.978922 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.978970 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.979013 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:09 crc kubenswrapper[4884]: I1210 00:32:09.979041 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:09Z","lastTransitionTime":"2025-12-10T00:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.082402 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.082493 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.082511 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.082538 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.082557 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:10Z","lastTransitionTime":"2025-12-10T00:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.185786 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.185832 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.185844 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.185863 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.185877 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:10Z","lastTransitionTime":"2025-12-10T00:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.287138 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:10 crc kubenswrapper[4884]: E1210 00:32:10.287417 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.289771 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.289848 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.289865 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.289892 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.289914 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:10Z","lastTransitionTime":"2025-12-10T00:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.393335 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.393395 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.393414 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.393473 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.393492 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:10Z","lastTransitionTime":"2025-12-10T00:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.497736 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.497814 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.497833 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.497867 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.497888 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:10Z","lastTransitionTime":"2025-12-10T00:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.601254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.601342 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.601368 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.601400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.601423 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:10Z","lastTransitionTime":"2025-12-10T00:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.705119 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.705270 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.705294 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.705326 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.705345 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:10Z","lastTransitionTime":"2025-12-10T00:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.809035 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.809146 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.809164 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.809188 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.809210 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:10Z","lastTransitionTime":"2025-12-10T00:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.912560 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.912626 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.912643 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.912666 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:10 crc kubenswrapper[4884]: I1210 00:32:10.912686 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:10Z","lastTransitionTime":"2025-12-10T00:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.008124 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.008218 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.008245 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.008277 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.008303 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: E1210 00:32:11.031782 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:11Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.039681 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.039757 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.039777 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.039803 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.039824 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: E1210 00:32:11.062943 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:11Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.070568 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.070624 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.070637 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.070660 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.070677 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: E1210 00:32:11.093313 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:11Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.098144 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.098207 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.098226 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.098252 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.098273 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: E1210 00:32:11.120681 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:11Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.126013 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.126077 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.126090 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.126111 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.126147 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: E1210 00:32:11.146725 4884 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-10T00:32:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"9e5a05e6-2f2c-47fe-b87f-7c17a55bede5\\\",\\\"systemUUID\\\":\\\"bb0ea269-6375-48fa-bc24-bfb5f5739a28\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-10T00:32:11Z is after 2025-08-24T17:21:41Z" Dec 10 00:32:11 crc kubenswrapper[4884]: E1210 00:32:11.146895 4884 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.149669 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.149739 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.149760 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.149787 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.149809 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.254882 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.254960 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.254979 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.255001 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.255019 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.286814 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.286899 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:11 crc kubenswrapper[4884]: E1210 00:32:11.287069 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.287125 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:11 crc kubenswrapper[4884]: E1210 00:32:11.287392 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:11 crc kubenswrapper[4884]: E1210 00:32:11.287669 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.358451 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.358519 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.358537 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.358564 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.358583 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.461918 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.461999 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.462022 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.462053 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.462075 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.565660 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.565737 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.565753 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.565771 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.565784 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.670166 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.670236 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.670254 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.670283 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.670304 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.773359 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.773504 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.773525 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.773586 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.773604 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.877389 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.877447 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.877458 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.877474 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.877484 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.980597 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.980700 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.980715 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.980741 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:11 crc kubenswrapper[4884]: I1210 00:32:11.980759 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:11Z","lastTransitionTime":"2025-12-10T00:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.083846 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.083939 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.083952 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.083970 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.083986 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:12Z","lastTransitionTime":"2025-12-10T00:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.187562 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.187614 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.187632 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.187656 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.187674 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:12Z","lastTransitionTime":"2025-12-10T00:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.286730 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:12 crc kubenswrapper[4884]: E1210 00:32:12.286895 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.291049 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.291105 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.291124 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.291146 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.291162 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:12Z","lastTransitionTime":"2025-12-10T00:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.394039 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.394156 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.394179 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.394211 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.394237 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:12Z","lastTransitionTime":"2025-12-10T00:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.497490 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.497557 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.497574 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.497598 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.497615 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:12Z","lastTransitionTime":"2025-12-10T00:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.601664 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.601736 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.601756 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.601780 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.601797 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:12Z","lastTransitionTime":"2025-12-10T00:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.705164 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.705220 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.705240 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.705268 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.705288 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:12Z","lastTransitionTime":"2025-12-10T00:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.809691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.809791 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.809817 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.809850 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.809873 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:12Z","lastTransitionTime":"2025-12-10T00:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.914233 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.914326 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.914374 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.914402 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:12 crc kubenswrapper[4884]: I1210 00:32:12.914469 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:12Z","lastTransitionTime":"2025-12-10T00:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.018297 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.018506 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.018528 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.018593 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.018612 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:13Z","lastTransitionTime":"2025-12-10T00:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.123121 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.123235 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.123301 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.123335 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.123393 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:13Z","lastTransitionTime":"2025-12-10T00:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.226745 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.226808 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.226825 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.226849 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.226868 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:13Z","lastTransitionTime":"2025-12-10T00:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.286848 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:13 crc kubenswrapper[4884]: E1210 00:32:13.287020 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.287105 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:13 crc kubenswrapper[4884]: E1210 00:32:13.287201 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.287584 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:13 crc kubenswrapper[4884]: E1210 00:32:13.287683 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.329736 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.329795 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.329814 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.329839 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.329856 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:13Z","lastTransitionTime":"2025-12-10T00:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.432886 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.432955 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.432974 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.433002 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.433021 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:13Z","lastTransitionTime":"2025-12-10T00:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.536012 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.536079 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.536096 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.536129 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.536150 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:13Z","lastTransitionTime":"2025-12-10T00:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.638965 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.639039 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.639061 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.639085 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.639104 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:13Z","lastTransitionTime":"2025-12-10T00:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.742306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.742391 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.742414 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.742478 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.742498 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:13Z","lastTransitionTime":"2025-12-10T00:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.845685 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.845735 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.845745 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.845761 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.845775 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:13Z","lastTransitionTime":"2025-12-10T00:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.947670 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.947707 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.947717 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.947733 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:13 crc kubenswrapper[4884]: I1210 00:32:13.947743 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:13Z","lastTransitionTime":"2025-12-10T00:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.051095 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.051532 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.051542 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.051558 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.051568 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:14Z","lastTransitionTime":"2025-12-10T00:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.154149 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.154208 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.154225 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.154250 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.154267 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:14Z","lastTransitionTime":"2025-12-10T00:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.258552 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.258642 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.258661 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.258691 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.258711 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:14Z","lastTransitionTime":"2025-12-10T00:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.287026 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:14 crc kubenswrapper[4884]: E1210 00:32:14.287303 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.362011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.362126 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.362182 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.362218 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.362238 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:14Z","lastTransitionTime":"2025-12-10T00:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.466318 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.466400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.466427 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.466502 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.466528 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:14Z","lastTransitionTime":"2025-12-10T00:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.570404 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.570516 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.570547 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.570580 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.570600 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:14Z","lastTransitionTime":"2025-12-10T00:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.674995 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.675067 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.675086 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.675113 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.675131 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:14Z","lastTransitionTime":"2025-12-10T00:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.779374 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.779485 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.779507 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.779535 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.779553 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:14Z","lastTransitionTime":"2025-12-10T00:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.883732 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.883864 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.883883 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.883957 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.883981 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:14Z","lastTransitionTime":"2025-12-10T00:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.987164 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.987261 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.987288 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.987316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:14 crc kubenswrapper[4884]: I1210 00:32:14.987338 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:14Z","lastTransitionTime":"2025-12-10T00:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.090728 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.090780 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.090791 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.090808 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.090822 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:15Z","lastTransitionTime":"2025-12-10T00:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.195116 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.195195 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.195210 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.195229 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.195245 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:15Z","lastTransitionTime":"2025-12-10T00:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.286957 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.287117 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:15 crc kubenswrapper[4884]: E1210 00:32:15.287152 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:15 crc kubenswrapper[4884]: E1210 00:32:15.287579 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.288350 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:15 crc kubenswrapper[4884]: E1210 00:32:15.288649 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.298358 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.298460 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.298482 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.298510 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.298528 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:15Z","lastTransitionTime":"2025-12-10T00:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.402799 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.402857 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.402872 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.402895 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.402908 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:15Z","lastTransitionTime":"2025-12-10T00:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.507114 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.507201 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.507222 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.507250 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.507271 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:15Z","lastTransitionTime":"2025-12-10T00:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.611385 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.611467 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.611477 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.611494 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.611503 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:15Z","lastTransitionTime":"2025-12-10T00:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.714994 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.715060 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.715079 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.715104 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.715121 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:15Z","lastTransitionTime":"2025-12-10T00:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.819004 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.819095 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.819113 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.819141 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.819162 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:15Z","lastTransitionTime":"2025-12-10T00:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.922376 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.922479 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.922499 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.922523 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:15 crc kubenswrapper[4884]: I1210 00:32:15.922540 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:15Z","lastTransitionTime":"2025-12-10T00:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.026407 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.026554 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.026582 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.026617 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.026643 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:16Z","lastTransitionTime":"2025-12-10T00:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.129930 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.130020 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.130038 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.130067 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.130088 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:16Z","lastTransitionTime":"2025-12-10T00:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.233767 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.233860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.233880 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.233906 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.233926 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:16Z","lastTransitionTime":"2025-12-10T00:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.286275 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:16 crc kubenswrapper[4884]: E1210 00:32:16.286685 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.336403 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.336505 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.336523 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.336550 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.336568 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:16Z","lastTransitionTime":"2025-12-10T00:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.439835 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.439910 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.439928 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.439983 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.440002 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:16Z","lastTransitionTime":"2025-12-10T00:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.543476 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.543541 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.543555 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.543578 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.543594 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:16Z","lastTransitionTime":"2025-12-10T00:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.646956 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.647014 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.647031 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.647058 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.647080 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:16Z","lastTransitionTime":"2025-12-10T00:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.750372 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.750512 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.750536 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.750564 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.750582 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:16Z","lastTransitionTime":"2025-12-10T00:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.853345 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.853409 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.853429 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.853484 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.853502 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:16Z","lastTransitionTime":"2025-12-10T00:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.957807 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.957881 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.957898 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.957926 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:16 crc kubenswrapper[4884]: I1210 00:32:16.957948 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:16Z","lastTransitionTime":"2025-12-10T00:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.060975 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.061050 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.061067 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.061092 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.061113 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:17Z","lastTransitionTime":"2025-12-10T00:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.164487 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.164598 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.164619 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.164689 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.164709 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:17Z","lastTransitionTime":"2025-12-10T00:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.268778 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.268842 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.268868 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.268898 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.268916 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:17Z","lastTransitionTime":"2025-12-10T00:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.286693 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:17 crc kubenswrapper[4884]: E1210 00:32:17.286877 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.286932 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:17 crc kubenswrapper[4884]: E1210 00:32:17.287073 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.287380 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:17 crc kubenswrapper[4884]: E1210 00:32:17.287743 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.363891 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=92.363865814 podStartE2EDuration="1m32.363865814s" podCreationTimestamp="2025-12-10 00:30:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:17.336627129 +0000 UTC m=+110.414584316" watchObservedRunningTime="2025-12-10 00:32:17.363865814 +0000 UTC m=+110.441822961" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.376516 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.376595 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.376616 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.376643 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.376664 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:17Z","lastTransitionTime":"2025-12-10T00:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.416359 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-pxpwg" podStartSLOduration=88.416333356 podStartE2EDuration="1m28.416333356s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:17.387077293 +0000 UTC m=+110.465034440" watchObservedRunningTime="2025-12-10 00:32:17.416333356 +0000 UTC m=+110.494290503" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.468642 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-bc4r6" podStartSLOduration=88.468595571 podStartE2EDuration="1m28.468595571s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:17.418321853 +0000 UTC m=+110.496279020" watchObservedRunningTime="2025-12-10 00:32:17.468595571 +0000 UTC m=+110.546552708" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.479466 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.479555 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.479574 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.479601 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.479620 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:17Z","lastTransitionTime":"2025-12-10T00:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.530940 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=55.530904737 podStartE2EDuration="55.530904737s" podCreationTimestamp="2025-12-10 00:31:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:17.5088018 +0000 UTC m=+110.586758957" watchObservedRunningTime="2025-12-10 00:32:17.530904737 +0000 UTC m=+110.608861894" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.549701 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podStartSLOduration=88.549677657 podStartE2EDuration="1m28.549677657s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:17.549260555 +0000 UTC m=+110.627217742" watchObservedRunningTime="2025-12-10 00:32:17.549677657 +0000 UTC m=+110.627634794" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.565947 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-4dtwx" podStartSLOduration=88.565929235 podStartE2EDuration="1m28.565929235s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:17.565641237 +0000 UTC m=+110.643598444" watchObservedRunningTime="2025-12-10 00:32:17.565929235 +0000 UTC m=+110.643886352" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.581081 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=26.581058682 podStartE2EDuration="26.581058682s" podCreationTimestamp="2025-12-10 00:31:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:17.580933087 +0000 UTC m=+110.658890244" watchObservedRunningTime="2025-12-10 00:32:17.581058682 +0000 UTC m=+110.659015829" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.582052 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.582091 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.582104 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.582119 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.582130 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:17Z","lastTransitionTime":"2025-12-10T00:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.623274 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-rcj68" podStartSLOduration=88.623256968 podStartE2EDuration="1m28.623256968s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:17.622515495 +0000 UTC m=+110.700472632" watchObservedRunningTime="2025-12-10 00:32:17.623256968 +0000 UTC m=+110.701214085" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.666119 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=88.666079411 podStartE2EDuration="1m28.666079411s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:17.640636538 +0000 UTC m=+110.718593665" watchObservedRunningTime="2025-12-10 00:32:17.666079411 +0000 UTC m=+110.744036528" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.666500 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=24.666491323 podStartE2EDuration="24.666491323s" podCreationTimestamp="2025-12-10 00:31:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:17.665147104 +0000 UTC m=+110.743104231" watchObservedRunningTime="2025-12-10 00:32:17.666491323 +0000 UTC m=+110.744448450" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.684295 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.684358 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.684370 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.684385 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.684397 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:17Z","lastTransitionTime":"2025-12-10T00:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.718685 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-bwm7p" podStartSLOduration=87.718666046 podStartE2EDuration="1m27.718666046s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:17.718559783 +0000 UTC m=+110.796516920" watchObservedRunningTime="2025-12-10 00:32:17.718666046 +0000 UTC m=+110.796623163" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.787367 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.787398 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.787408 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.787425 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.787455 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:17Z","lastTransitionTime":"2025-12-10T00:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.890245 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.890316 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.890354 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.890389 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.890411 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:17Z","lastTransitionTime":"2025-12-10T00:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.993709 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.993797 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.993830 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.993860 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:17 crc kubenswrapper[4884]: I1210 00:32:17.993884 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:17Z","lastTransitionTime":"2025-12-10T00:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.097506 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.097576 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.097601 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.097631 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.097654 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:18Z","lastTransitionTime":"2025-12-10T00:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.201743 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.201797 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.201814 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.201840 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.201857 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:18Z","lastTransitionTime":"2025-12-10T00:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.286301 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:18 crc kubenswrapper[4884]: E1210 00:32:18.286603 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.305146 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.305209 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.305226 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.305250 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.305269 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:18Z","lastTransitionTime":"2025-12-10T00:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.408886 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.408944 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.408961 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.408986 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.409005 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:18Z","lastTransitionTime":"2025-12-10T00:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.512117 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.512189 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.512215 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.512275 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.512299 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:18Z","lastTransitionTime":"2025-12-10T00:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.616048 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.616095 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.616113 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.616136 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.616153 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:18Z","lastTransitionTime":"2025-12-10T00:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.718924 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.718971 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.718990 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.719014 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.719031 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:18Z","lastTransitionTime":"2025-12-10T00:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.822102 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.822155 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.822174 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.822197 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.822215 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:18Z","lastTransitionTime":"2025-12-10T00:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.925883 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.925930 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.925942 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.925961 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:18 crc kubenswrapper[4884]: I1210 00:32:18.925973 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:18Z","lastTransitionTime":"2025-12-10T00:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.029306 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.029371 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.029390 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.029416 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.029558 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:19Z","lastTransitionTime":"2025-12-10T00:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.135028 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.135104 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.135130 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.135159 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.135184 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:19Z","lastTransitionTime":"2025-12-10T00:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.238781 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.238834 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.238854 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.238876 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.238894 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:19Z","lastTransitionTime":"2025-12-10T00:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.286794 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:19 crc kubenswrapper[4884]: E1210 00:32:19.287031 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.286807 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.287099 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:19 crc kubenswrapper[4884]: E1210 00:32:19.287677 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:19 crc kubenswrapper[4884]: E1210 00:32:19.287851 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.341604 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.341681 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.341708 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.341734 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.341756 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:19Z","lastTransitionTime":"2025-12-10T00:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.444382 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.444498 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.444531 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.444557 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.444575 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:19Z","lastTransitionTime":"2025-12-10T00:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.547460 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.547530 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.547566 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.547601 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.547624 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:19Z","lastTransitionTime":"2025-12-10T00:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.650713 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.650778 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.650817 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.650842 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.650864 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:19Z","lastTransitionTime":"2025-12-10T00:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.753885 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.753972 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.753996 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.754024 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.754045 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:19Z","lastTransitionTime":"2025-12-10T00:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.857612 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.857685 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.857709 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.857739 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.857765 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:19Z","lastTransitionTime":"2025-12-10T00:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.960870 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.960943 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.960972 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.960999 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:19 crc kubenswrapper[4884]: I1210 00:32:19.961021 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:19Z","lastTransitionTime":"2025-12-10T00:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.064623 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.064704 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.064723 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.064750 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.064770 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:20Z","lastTransitionTime":"2025-12-10T00:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.168310 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.168394 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.168418 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.168475 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.168496 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:20Z","lastTransitionTime":"2025-12-10T00:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.272072 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.272159 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.272187 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.272216 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.272234 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:20Z","lastTransitionTime":"2025-12-10T00:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.286497 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:20 crc kubenswrapper[4884]: E1210 00:32:20.286693 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.375489 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.375587 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.375599 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.375623 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.375640 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:20Z","lastTransitionTime":"2025-12-10T00:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.478782 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.478841 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.478858 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.478883 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.478901 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:20Z","lastTransitionTime":"2025-12-10T00:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.581606 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.581655 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.581672 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.581700 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.581725 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:20Z","lastTransitionTime":"2025-12-10T00:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.684952 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.685012 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.685029 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.685083 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.685101 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:20Z","lastTransitionTime":"2025-12-10T00:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.788780 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.788853 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.788870 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.788893 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.788917 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:20Z","lastTransitionTime":"2025-12-10T00:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.900556 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.900613 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.900630 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.900660 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:20 crc kubenswrapper[4884]: I1210 00:32:20.900678 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:20Z","lastTransitionTime":"2025-12-10T00:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.004274 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.004341 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.004359 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.004385 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.004403 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:21Z","lastTransitionTime":"2025-12-10T00:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.108058 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.108880 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.108926 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.108955 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.108974 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:21Z","lastTransitionTime":"2025-12-10T00:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.211544 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.211630 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.211648 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.211675 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.211693 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:21Z","lastTransitionTime":"2025-12-10T00:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.217317 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.217382 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.217400 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.217424 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.217469 4884 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-10T00:32:21Z","lastTransitionTime":"2025-12-10T00:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.285748 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82"] Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.287536 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:21 crc kubenswrapper[4884]: E1210 00:32:21.287620 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.287731 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.287758 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.287822 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:21 crc kubenswrapper[4884]: E1210 00:32:21.288264 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:21 crc kubenswrapper[4884]: E1210 00:32:21.288525 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.288745 4884 scope.go:117] "RemoveContainer" containerID="1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209" Dec 10 00:32:21 crc kubenswrapper[4884]: E1210 00:32:21.289006 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-g8w62_openshift-ovn-kubernetes(7022e894-7a34-4a84-8b18-e4440e11e659)\"" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.291625 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.291785 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.292544 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.292631 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.373554 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32090c51-7422-43dc-b70e-64d7c95a09ec-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.373917 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/32090c51-7422-43dc-b70e-64d7c95a09ec-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.373957 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/32090c51-7422-43dc-b70e-64d7c95a09ec-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.373982 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/32090c51-7422-43dc-b70e-64d7c95a09ec-service-ca\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.374036 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/32090c51-7422-43dc-b70e-64d7c95a09ec-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.474653 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32090c51-7422-43dc-b70e-64d7c95a09ec-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.474745 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/32090c51-7422-43dc-b70e-64d7c95a09ec-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.474858 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/32090c51-7422-43dc-b70e-64d7c95a09ec-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.474896 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/32090c51-7422-43dc-b70e-64d7c95a09ec-service-ca\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.474936 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/32090c51-7422-43dc-b70e-64d7c95a09ec-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.474959 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/32090c51-7422-43dc-b70e-64d7c95a09ec-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.475110 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/32090c51-7422-43dc-b70e-64d7c95a09ec-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.476689 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/32090c51-7422-43dc-b70e-64d7c95a09ec-service-ca\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.482930 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32090c51-7422-43dc-b70e-64d7c95a09ec-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.494031 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/32090c51-7422-43dc-b70e-64d7c95a09ec-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-6qc82\" (UID: \"32090c51-7422-43dc-b70e-64d7c95a09ec\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.608195 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.980752 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" event={"ID":"32090c51-7422-43dc-b70e-64d7c95a09ec","Type":"ContainerStarted","Data":"71b74d02b553a2d1413d771f375d8a8c6ddf3576d1c016a37a2313a23c58d9e2"} Dec 10 00:32:21 crc kubenswrapper[4884]: I1210 00:32:21.980816 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" event={"ID":"32090c51-7422-43dc-b70e-64d7c95a09ec","Type":"ContainerStarted","Data":"c18a146873375166a9a2b50c11cb5c316b65028ff96a0bd627e8dc512ea99410"} Dec 10 00:32:22 crc kubenswrapper[4884]: I1210 00:32:22.003107 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-6qc82" podStartSLOduration=93.003080275 podStartE2EDuration="1m33.003080275s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:22.000959654 +0000 UTC m=+115.078916771" watchObservedRunningTime="2025-12-10 00:32:22.003080275 +0000 UTC m=+115.081037422" Dec 10 00:32:22 crc kubenswrapper[4884]: I1210 00:32:22.286840 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:22 crc kubenswrapper[4884]: E1210 00:32:22.287161 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:22 crc kubenswrapper[4884]: I1210 00:32:22.986927 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rcj68_0269081f-f135-4e66-91fd-a16277a00355/kube-multus/1.log" Dec 10 00:32:22 crc kubenswrapper[4884]: I1210 00:32:22.987614 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rcj68_0269081f-f135-4e66-91fd-a16277a00355/kube-multus/0.log" Dec 10 00:32:22 crc kubenswrapper[4884]: I1210 00:32:22.987690 4884 generic.go:334] "Generic (PLEG): container finished" podID="0269081f-f135-4e66-91fd-a16277a00355" containerID="90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa" exitCode=1 Dec 10 00:32:22 crc kubenswrapper[4884]: I1210 00:32:22.987738 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rcj68" event={"ID":"0269081f-f135-4e66-91fd-a16277a00355","Type":"ContainerDied","Data":"90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa"} Dec 10 00:32:22 crc kubenswrapper[4884]: I1210 00:32:22.987790 4884 scope.go:117] "RemoveContainer" containerID="bd70aea66b5022974e6cf899df035152bfdb1a3c08a6483896cf981b0bd9d5c2" Dec 10 00:32:22 crc kubenswrapper[4884]: I1210 00:32:22.988347 4884 scope.go:117] "RemoveContainer" containerID="90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa" Dec 10 00:32:22 crc kubenswrapper[4884]: E1210 00:32:22.988657 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-rcj68_openshift-multus(0269081f-f135-4e66-91fd-a16277a00355)\"" pod="openshift-multus/multus-rcj68" podUID="0269081f-f135-4e66-91fd-a16277a00355" Dec 10 00:32:23 crc kubenswrapper[4884]: I1210 00:32:23.286605 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:23 crc kubenswrapper[4884]: I1210 00:32:23.286649 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:23 crc kubenswrapper[4884]: I1210 00:32:23.286627 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:23 crc kubenswrapper[4884]: E1210 00:32:23.286832 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:23 crc kubenswrapper[4884]: E1210 00:32:23.286975 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:23 crc kubenswrapper[4884]: E1210 00:32:23.287244 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:23 crc kubenswrapper[4884]: I1210 00:32:23.994886 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rcj68_0269081f-f135-4e66-91fd-a16277a00355/kube-multus/1.log" Dec 10 00:32:24 crc kubenswrapper[4884]: I1210 00:32:24.287069 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:24 crc kubenswrapper[4884]: E1210 00:32:24.287352 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:25 crc kubenswrapper[4884]: I1210 00:32:25.286992 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:25 crc kubenswrapper[4884]: I1210 00:32:25.287061 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:25 crc kubenswrapper[4884]: I1210 00:32:25.287063 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:25 crc kubenswrapper[4884]: E1210 00:32:25.287187 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:25 crc kubenswrapper[4884]: E1210 00:32:25.287364 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:25 crc kubenswrapper[4884]: E1210 00:32:25.287546 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:26 crc kubenswrapper[4884]: I1210 00:32:26.286743 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:26 crc kubenswrapper[4884]: E1210 00:32:26.286940 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:27 crc kubenswrapper[4884]: E1210 00:32:27.203784 4884 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Dec 10 00:32:27 crc kubenswrapper[4884]: I1210 00:32:27.286493 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:27 crc kubenswrapper[4884]: I1210 00:32:27.286554 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:27 crc kubenswrapper[4884]: I1210 00:32:27.286493 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:27 crc kubenswrapper[4884]: E1210 00:32:27.286690 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:27 crc kubenswrapper[4884]: E1210 00:32:27.288728 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:27 crc kubenswrapper[4884]: E1210 00:32:27.288908 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:27 crc kubenswrapper[4884]: E1210 00:32:27.415668 4884 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:32:28 crc kubenswrapper[4884]: I1210 00:32:28.286744 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:28 crc kubenswrapper[4884]: E1210 00:32:28.286899 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:29 crc kubenswrapper[4884]: I1210 00:32:29.286600 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:29 crc kubenswrapper[4884]: I1210 00:32:29.286675 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:29 crc kubenswrapper[4884]: I1210 00:32:29.286600 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:29 crc kubenswrapper[4884]: E1210 00:32:29.286828 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:29 crc kubenswrapper[4884]: E1210 00:32:29.287014 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:29 crc kubenswrapper[4884]: E1210 00:32:29.287131 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:30 crc kubenswrapper[4884]: I1210 00:32:30.287093 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:30 crc kubenswrapper[4884]: E1210 00:32:30.287294 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:31 crc kubenswrapper[4884]: I1210 00:32:31.286388 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:31 crc kubenswrapper[4884]: E1210 00:32:31.286707 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:31 crc kubenswrapper[4884]: I1210 00:32:31.287419 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:31 crc kubenswrapper[4884]: E1210 00:32:31.287612 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:31 crc kubenswrapper[4884]: I1210 00:32:31.288147 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:31 crc kubenswrapper[4884]: E1210 00:32:31.288408 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:32 crc kubenswrapper[4884]: I1210 00:32:32.286070 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:32 crc kubenswrapper[4884]: E1210 00:32:32.286272 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:32 crc kubenswrapper[4884]: E1210 00:32:32.418201 4884 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:32:33 crc kubenswrapper[4884]: I1210 00:32:33.287076 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:33 crc kubenswrapper[4884]: I1210 00:32:33.287175 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:33 crc kubenswrapper[4884]: I1210 00:32:33.287280 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:33 crc kubenswrapper[4884]: E1210 00:32:33.287274 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:33 crc kubenswrapper[4884]: E1210 00:32:33.287836 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:33 crc kubenswrapper[4884]: E1210 00:32:33.287957 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:34 crc kubenswrapper[4884]: I1210 00:32:34.286295 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:34 crc kubenswrapper[4884]: E1210 00:32:34.286763 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:34 crc kubenswrapper[4884]: I1210 00:32:34.287057 4884 scope.go:117] "RemoveContainer" containerID="90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa" Dec 10 00:32:35 crc kubenswrapper[4884]: I1210 00:32:35.046909 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rcj68_0269081f-f135-4e66-91fd-a16277a00355/kube-multus/1.log" Dec 10 00:32:35 crc kubenswrapper[4884]: I1210 00:32:35.046993 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rcj68" event={"ID":"0269081f-f135-4e66-91fd-a16277a00355","Type":"ContainerStarted","Data":"70a038a9dee1c81b9417df5e512466f52dbcbee69d176ecad5c1a2f358d0fb4d"} Dec 10 00:32:35 crc kubenswrapper[4884]: I1210 00:32:35.286633 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:35 crc kubenswrapper[4884]: I1210 00:32:35.286641 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:35 crc kubenswrapper[4884]: E1210 00:32:35.286862 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:35 crc kubenswrapper[4884]: I1210 00:32:35.286649 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:35 crc kubenswrapper[4884]: E1210 00:32:35.286948 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:35 crc kubenswrapper[4884]: E1210 00:32:35.287101 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:36 crc kubenswrapper[4884]: I1210 00:32:36.286863 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:36 crc kubenswrapper[4884]: E1210 00:32:36.287601 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:36 crc kubenswrapper[4884]: I1210 00:32:36.287926 4884 scope.go:117] "RemoveContainer" containerID="1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209" Dec 10 00:32:37 crc kubenswrapper[4884]: I1210 00:32:37.057778 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/3.log" Dec 10 00:32:37 crc kubenswrapper[4884]: I1210 00:32:37.060764 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerStarted","Data":"25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5"} Dec 10 00:32:37 crc kubenswrapper[4884]: I1210 00:32:37.061603 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:32:37 crc kubenswrapper[4884]: I1210 00:32:37.110285 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podStartSLOduration=107.11026559 podStartE2EDuration="1m47.11026559s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:37.109759496 +0000 UTC m=+130.187716693" watchObservedRunningTime="2025-12-10 00:32:37.11026559 +0000 UTC m=+130.188222717" Dec 10 00:32:37 crc kubenswrapper[4884]: I1210 00:32:37.286388 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:37 crc kubenswrapper[4884]: I1210 00:32:37.286424 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:37 crc kubenswrapper[4884]: I1210 00:32:37.286551 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:37 crc kubenswrapper[4884]: E1210 00:32:37.291187 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:37 crc kubenswrapper[4884]: E1210 00:32:37.291410 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:37 crc kubenswrapper[4884]: E1210 00:32:37.291680 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:37 crc kubenswrapper[4884]: I1210 00:32:37.307941 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-ndwnl"] Dec 10 00:32:37 crc kubenswrapper[4884]: E1210 00:32:37.419190 4884 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:32:38 crc kubenswrapper[4884]: I1210 00:32:38.064872 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:38 crc kubenswrapper[4884]: E1210 00:32:38.065052 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:38 crc kubenswrapper[4884]: I1210 00:32:38.286107 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:38 crc kubenswrapper[4884]: E1210 00:32:38.286319 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:39 crc kubenswrapper[4884]: I1210 00:32:39.286932 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:39 crc kubenswrapper[4884]: I1210 00:32:39.286932 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:39 crc kubenswrapper[4884]: E1210 00:32:39.287178 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:39 crc kubenswrapper[4884]: E1210 00:32:39.287235 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:40 crc kubenswrapper[4884]: I1210 00:32:40.286835 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:40 crc kubenswrapper[4884]: E1210 00:32:40.287332 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:40 crc kubenswrapper[4884]: I1210 00:32:40.286928 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:40 crc kubenswrapper[4884]: E1210 00:32:40.287823 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:41 crc kubenswrapper[4884]: I1210 00:32:41.286274 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:41 crc kubenswrapper[4884]: I1210 00:32:41.286551 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:41 crc kubenswrapper[4884]: E1210 00:32:41.286973 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 10 00:32:41 crc kubenswrapper[4884]: E1210 00:32:41.287236 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 10 00:32:42 crc kubenswrapper[4884]: I1210 00:32:42.286932 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:42 crc kubenswrapper[4884]: I1210 00:32:42.286950 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:42 crc kubenswrapper[4884]: E1210 00:32:42.287320 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 10 00:32:42 crc kubenswrapper[4884]: E1210 00:32:42.287150 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-ndwnl" podUID="ec324800-e820-40c0-8b51-b020075f09eb" Dec 10 00:32:43 crc kubenswrapper[4884]: I1210 00:32:43.286573 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:43 crc kubenswrapper[4884]: I1210 00:32:43.286640 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:43 crc kubenswrapper[4884]: I1210 00:32:43.290117 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 10 00:32:43 crc kubenswrapper[4884]: I1210 00:32:43.290458 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 10 00:32:43 crc kubenswrapper[4884]: I1210 00:32:43.291562 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 10 00:32:43 crc kubenswrapper[4884]: I1210 00:32:43.291723 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 10 00:32:44 crc kubenswrapper[4884]: I1210 00:32:44.286109 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:44 crc kubenswrapper[4884]: I1210 00:32:44.286198 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:32:44 crc kubenswrapper[4884]: I1210 00:32:44.289087 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 10 00:32:44 crc kubenswrapper[4884]: I1210 00:32:44.289140 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.756011 4884 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.806307 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-rqkv4"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.807109 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-v4mzd"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.807870 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-pruner-29422080-pq6kj"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.807982 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.808060 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.809559 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29422080-pq6kj" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.812651 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-4d9rn"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.813087 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.813388 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.813617 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.813671 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.813815 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.814074 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.814100 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qjqjf"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.814226 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.814479 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.814695 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.814945 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.814947 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.815058 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.815107 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.815424 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"serviceca" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.818710 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.818867 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.819080 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"pruner-dockercfg-p7bcw" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.822292 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.823083 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.824795 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-wsf4w"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.825603 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-wpwx7"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.826141 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.826158 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.826868 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.830691 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.831220 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.831784 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.831912 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.836972 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.837370 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.844328 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.858743 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.859918 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.860775 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.860870 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.861942 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.865912 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.866376 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.867355 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.867474 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.869714 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.874661 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.874792 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.879934 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.890230 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.890572 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.890727 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.890874 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.891017 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.891130 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.893309 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.893659 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.893913 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.894156 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.895421 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.895772 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.895885 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.895966 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.896035 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.897806 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.898690 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.899068 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900133 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szbbd\" (UniqueName: \"kubernetes.io/projected/415bb537-8c4e-4114-b627-6c76d7cb6738-kube-api-access-szbbd\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900271 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-etcd-serving-ca\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900383 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-image-import-ca\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900515 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4e7d9725-55d4-4230-a690-6f1a647e353d-audit-dir\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900651 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900760 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/25892407-7061-429c-881c-018f0f2e3fff-node-pullsecrets\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900861 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.901013 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gp8j\" (UniqueName: \"kubernetes.io/projected/fd8e23b3-5b64-47bc-903f-4feb12a34389-kube-api-access-9gp8j\") pod \"machine-api-operator-5694c8668f-v4mzd\" (UID: \"fd8e23b3-5b64-47bc-903f-4feb12a34389\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.901114 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.901226 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af455a3c-331c-44ed-9619-1f66379fd774-serving-cert\") pod \"route-controller-manager-6576b87f9c-p6jkw\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.901340 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-trusted-ca-bundle\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.901473 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-config\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.901603 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.901718 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/fd8e23b3-5b64-47bc-903f-4feb12a34389-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-v4mzd\" (UID: \"fd8e23b3-5b64-47bc-903f-4feb12a34389\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.901821 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-audit-policies\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.901936 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be-serviceca\") pod \"image-pruner-29422080-pq6kj\" (UID: \"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be\") " pod="openshift-image-registry/image-pruner-29422080-pq6kj" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.902042 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.902153 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd8e23b3-5b64-47bc-903f-4feb12a34389-config\") pod \"machine-api-operator-5694c8668f-v4mzd\" (UID: \"fd8e23b3-5b64-47bc-903f-4feb12a34389\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.902264 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/25892407-7061-429c-881c-018f0f2e3fff-etcd-client\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.902372 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af455a3c-331c-44ed-9619-1f66379fd774-client-ca\") pod \"route-controller-manager-6576b87f9c-p6jkw\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.902518 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-audit\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.902635 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mw4q\" (UniqueName: \"kubernetes.io/projected/4e7d9725-55d4-4230-a690-6f1a647e353d-kube-api-access-4mw4q\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.902752 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25892407-7061-429c-881c-018f0f2e3fff-serving-cert\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.902867 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.902982 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.903092 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af455a3c-331c-44ed-9619-1f66379fd774-config\") pod \"route-controller-manager-6576b87f9c-p6jkw\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900234 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.903242 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.903554 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/415bb537-8c4e-4114-b627-6c76d7cb6738-serving-cert\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.903581 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blxpf\" (UniqueName: \"kubernetes.io/projected/25892407-7061-429c-881c-018f0f2e3fff-kube-api-access-blxpf\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.903609 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900367 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900415 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.903754 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900424 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900591 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900668 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.903937 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.900997 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.902137 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.902526 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.903762 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-config\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.904185 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mswlq\" (UniqueName: \"kubernetes.io/projected/af455a3c-331c-44ed-9619-1f66379fd774-kube-api-access-mswlq\") pod \"route-controller-manager-6576b87f9c-p6jkw\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.904337 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/25892407-7061-429c-881c-018f0f2e3fff-encryption-config\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.904487 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.904589 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.904684 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rhvg\" (UniqueName: \"kubernetes.io/projected/f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be-kube-api-access-4rhvg\") pod \"image-pruner-29422080-pq6kj\" (UID: \"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be\") " pod="openshift-image-registry/image-pruner-29422080-pq6kj" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.904813 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-client-ca\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.904946 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/fd8e23b3-5b64-47bc-903f-4feb12a34389-images\") pod \"machine-api-operator-5694c8668f-v4mzd\" (UID: \"fd8e23b3-5b64-47bc-903f-4feb12a34389\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.905048 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/25892407-7061-429c-881c-018f0f2e3fff-audit-dir\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.905105 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.905133 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.905719 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.905738 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.905764 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.905838 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.905939 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.905958 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.906102 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.906273 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.906296 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.906411 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.906629 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.906800 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.906961 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.906966 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.907483 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.907602 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.907714 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.907734 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.907863 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.907881 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.908609 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.908998 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.909042 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.909423 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.924572 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-v4mzd"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.927786 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9rlgm"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.928707 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xkhl5"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.929157 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-pt6z4"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.929581 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-lx9b2"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.929958 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-5hrps"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.930485 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.909591 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.931192 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-9rlgm" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.931524 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.959717 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.960219 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.960691 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.961282 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.961873 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.962409 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.962762 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-lx9b2" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.962418 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.909863 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.917350 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.917875 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.922291 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.969160 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.971510 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.922949 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.923001 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.923201 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.923403 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.923473 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.972507 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.972594 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.972690 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.972687 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.972865 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.992057 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.992267 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.992912 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.993147 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.993195 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.993368 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.993587 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.993622 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.993953 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.994340 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.995069 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.996173 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc" Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.996292 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.997341 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw"] Dec 10 00:32:51 crc kubenswrapper[4884]: I1210 00:32:51.997453 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:51.999694 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:51.999940 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29422080-pq6kj"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.000685 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.004081 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-rqkv4"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.006761 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007224 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007266 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gp8j\" (UniqueName: \"kubernetes.io/projected/fd8e23b3-5b64-47bc-903f-4feb12a34389-kube-api-access-9gp8j\") pod \"machine-api-operator-5694c8668f-v4mzd\" (UID: \"fd8e23b3-5b64-47bc-903f-4feb12a34389\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007288 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af455a3c-331c-44ed-9619-1f66379fd774-serving-cert\") pod \"route-controller-manager-6576b87f9c-p6jkw\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007313 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qffq9\" (UniqueName: \"kubernetes.io/projected/c815bdba-e39d-4b49-a85f-28a1e17cfc3c-kube-api-access-qffq9\") pod \"console-operator-58897d9998-wsf4w\" (UID: \"c815bdba-e39d-4b49-a85f-28a1e17cfc3c\") " pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007335 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-trusted-ca-bundle\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007353 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007373 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6gv5\" (UniqueName: \"kubernetes.io/projected/55764537-dbcc-4a71-8dbd-42cb335e045a-kube-api-access-j6gv5\") pod \"ingress-operator-5b745b69d9-v8xwt\" (UID: \"55764537-dbcc-4a71-8dbd-42cb335e045a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007389 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af7457bc-9a2f-4b9a-a641-58ba17f4d08e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-mptkq\" (UID: \"af7457bc-9a2f-4b9a-a641-58ba17f4d08e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007408 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-config\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007445 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8db32703-3873-413f-b4b4-e1ab1d68abe8-etcd-client\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007464 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/fd8e23b3-5b64-47bc-903f-4feb12a34389-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-v4mzd\" (UID: \"fd8e23b3-5b64-47bc-903f-4feb12a34389\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007480 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/55764537-dbcc-4a71-8dbd-42cb335e045a-trusted-ca\") pod \"ingress-operator-5b745b69d9-v8xwt\" (UID: \"55764537-dbcc-4a71-8dbd-42cb335e045a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007498 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxvmj\" (UniqueName: \"kubernetes.io/projected/af7457bc-9a2f-4b9a-a641-58ba17f4d08e-kube-api-access-qxvmj\") pod \"openshift-apiserver-operator-796bbdcf4f-mptkq\" (UID: \"af7457bc-9a2f-4b9a-a641-58ba17f4d08e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007526 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-audit-policies\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007544 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c815bdba-e39d-4b49-a85f-28a1e17cfc3c-config\") pod \"console-operator-58897d9998-wsf4w\" (UID: \"c815bdba-e39d-4b49-a85f-28a1e17cfc3c\") " pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007563 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be-serviceca\") pod \"image-pruner-29422080-pq6kj\" (UID: \"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be\") " pod="openshift-image-registry/image-pruner-29422080-pq6kj" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007580 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007599 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd8e23b3-5b64-47bc-903f-4feb12a34389-config\") pod \"machine-api-operator-5694c8668f-v4mzd\" (UID: \"fd8e23b3-5b64-47bc-903f-4feb12a34389\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007614 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/25892407-7061-429c-881c-018f0f2e3fff-etcd-client\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007629 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/8db32703-3873-413f-b4b4-e1ab1d68abe8-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007649 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-m64th\" (UID: \"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007668 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af455a3c-331c-44ed-9619-1f66379fd774-client-ca\") pod \"route-controller-manager-6576b87f9c-p6jkw\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007691 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8db32703-3873-413f-b4b4-e1ab1d68abe8-serving-cert\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007710 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-audit\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007726 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mw4q\" (UniqueName: \"kubernetes.io/projected/4e7d9725-55d4-4230-a690-6f1a647e353d-kube-api-access-4mw4q\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007745 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b41f44d-d610-4a87-8ce0-bfb0764ab749-config\") pod \"kube-apiserver-operator-766d6c64bb-mb82t\" (UID: \"3b41f44d-d610-4a87-8ce0-bfb0764ab749\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007761 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8db32703-3873-413f-b4b4-e1ab1d68abe8-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007780 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25892407-7061-429c-881c-018f0f2e3fff-serving-cert\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007796 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/17683ffc-bd60-4096-80d0-5692bcc71422-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007817 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007833 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8db32703-3873-413f-b4b4-e1ab1d68abe8-audit-policies\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007851 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007867 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-m64th\" (UID: \"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007884 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3b41f44d-d610-4a87-8ce0-bfb0764ab749-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-mb82t\" (UID: \"3b41f44d-d610-4a87-8ce0-bfb0764ab749\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007916 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af455a3c-331c-44ed-9619-1f66379fd774-config\") pod \"route-controller-manager-6576b87f9c-p6jkw\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007941 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007960 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfhhl\" (UniqueName: \"kubernetes.io/projected/8db32703-3873-413f-b4b4-e1ab1d68abe8-kube-api-access-zfhhl\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.007983 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/415bb537-8c4e-4114-b627-6c76d7cb6738-serving-cert\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008002 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blxpf\" (UniqueName: \"kubernetes.io/projected/25892407-7061-429c-881c-018f0f2e3fff-kube-api-access-blxpf\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008021 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008041 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zx4f4\" (UniqueName: \"kubernetes.io/projected/13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0-kube-api-access-zx4f4\") pod \"cluster-image-registry-operator-dc59b4c8b-m64th\" (UID: \"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008058 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-config\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008074 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17683ffc-bd60-4096-80d0-5692bcc71422-config\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008090 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/17683ffc-bd60-4096-80d0-5692bcc71422-serving-cert\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008109 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mswlq\" (UniqueName: \"kubernetes.io/projected/af455a3c-331c-44ed-9619-1f66379fd774-kube-api-access-mswlq\") pod \"route-controller-manager-6576b87f9c-p6jkw\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008126 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-j4pqg\" (UID: \"a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008145 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/25892407-7061-429c-881c-018f0f2e3fff-encryption-config\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008164 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008181 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/18cfe787-4f63-47c1-a563-d217e266d468-serving-cert\") pod \"openshift-config-operator-7777fb866f-6sfkf\" (UID: \"18cfe787-4f63-47c1-a563-d217e266d468\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008199 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-client-ca\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008215 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/fd8e23b3-5b64-47bc-903f-4feb12a34389-images\") pod \"machine-api-operator-5694c8668f-v4mzd\" (UID: \"fd8e23b3-5b64-47bc-903f-4feb12a34389\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008231 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/25892407-7061-429c-881c-018f0f2e3fff-audit-dir\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008248 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rhvg\" (UniqueName: \"kubernetes.io/projected/f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be-kube-api-access-4rhvg\") pod \"image-pruner-29422080-pq6kj\" (UID: \"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be\") " pod="openshift-image-registry/image-pruner-29422080-pq6kj" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008264 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008283 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008300 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxqfm\" (UniqueName: \"kubernetes.io/projected/18cfe787-4f63-47c1-a563-d217e266d468-kube-api-access-qxqfm\") pod \"openshift-config-operator-7777fb866f-6sfkf\" (UID: \"18cfe787-4f63-47c1-a563-d217e266d468\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008317 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c815bdba-e39d-4b49-a85f-28a1e17cfc3c-trusted-ca\") pod \"console-operator-58897d9998-wsf4w\" (UID: \"c815bdba-e39d-4b49-a85f-28a1e17cfc3c\") " pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008332 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b41f44d-d610-4a87-8ce0-bfb0764ab749-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-mb82t\" (UID: \"3b41f44d-d610-4a87-8ce0-bfb0764ab749\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008352 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/8db32703-3873-413f-b4b4-e1ab1d68abe8-encryption-config\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008380 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/18cfe787-4f63-47c1-a563-d217e266d468-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6sfkf\" (UID: \"18cfe787-4f63-47c1-a563-d217e266d468\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008398 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-m64th\" (UID: \"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008416 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/55764537-dbcc-4a71-8dbd-42cb335e045a-metrics-tls\") pod \"ingress-operator-5b745b69d9-v8xwt\" (UID: \"55764537-dbcc-4a71-8dbd-42cb335e045a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008447 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/55764537-dbcc-4a71-8dbd-42cb335e045a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-v8xwt\" (UID: \"55764537-dbcc-4a71-8dbd-42cb335e045a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008464 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/17683ffc-bd60-4096-80d0-5692bcc71422-service-ca-bundle\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008481 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwvxp\" (UniqueName: \"kubernetes.io/projected/17683ffc-bd60-4096-80d0-5692bcc71422-kube-api-access-zwvxp\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008500 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szbbd\" (UniqueName: \"kubernetes.io/projected/415bb537-8c4e-4114-b627-6c76d7cb6738-kube-api-access-szbbd\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008519 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brj5l\" (UniqueName: \"kubernetes.io/projected/a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100-kube-api-access-brj5l\") pod \"openshift-controller-manager-operator-756b6f6bc6-j4pqg\" (UID: \"a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008537 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c815bdba-e39d-4b49-a85f-28a1e17cfc3c-serving-cert\") pod \"console-operator-58897d9998-wsf4w\" (UID: \"c815bdba-e39d-4b49-a85f-28a1e17cfc3c\") " pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008554 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af7457bc-9a2f-4b9a-a641-58ba17f4d08e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-mptkq\" (UID: \"af7457bc-9a2f-4b9a-a641-58ba17f4d08e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008572 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8db32703-3873-413f-b4b4-e1ab1d68abe8-audit-dir\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008589 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-image-import-ca\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008607 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4e7d9725-55d4-4230-a690-6f1a647e353d-audit-dir\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008626 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-etcd-serving-ca\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008644 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008661 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-j4pqg\" (UID: \"a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008678 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008702 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/25892407-7061-429c-881c-018f0f2e3fff-node-pullsecrets\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.008778 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/25892407-7061-429c-881c-018f0f2e3fff-node-pullsecrets\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.009298 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-audit\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.010461 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.012210 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-audit-policies\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.012286 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-wsf4w"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.012313 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qjqjf"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.012325 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.012992 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be-serviceca\") pod \"image-pruner-29422080-pq6kj\" (UID: \"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be\") " pod="openshift-image-registry/image-pruner-29422080-pq6kj" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.013647 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af455a3c-331c-44ed-9619-1f66379fd774-config\") pod \"route-controller-manager-6576b87f9c-p6jkw\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.014165 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4e7d9725-55d4-4230-a690-6f1a647e353d-audit-dir\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.014369 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-image-import-ca\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.014660 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-trusted-ca-bundle\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.014689 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fd8e23b3-5b64-47bc-903f-4feb12a34389-config\") pod \"machine-api-operator-5694c8668f-v4mzd\" (UID: \"fd8e23b3-5b64-47bc-903f-4feb12a34389\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.015745 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af455a3c-331c-44ed-9619-1f66379fd774-serving-cert\") pod \"route-controller-manager-6576b87f9c-p6jkw\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.015913 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.015945 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-etcd-serving-ca\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.015941 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.016998 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.017337 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/25892407-7061-429c-881c-018f0f2e3fff-audit-dir\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.017423 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/fd8e23b3-5b64-47bc-903f-4feb12a34389-images\") pod \"machine-api-operator-5694c8668f-v4mzd\" (UID: \"fd8e23b3-5b64-47bc-903f-4feb12a34389\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.018836 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.019104 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25892407-7061-429c-881c-018f0f2e3fff-config\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.019138 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-config\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.019140 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.019481 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.019533 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-mbfs5"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.019543 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/25892407-7061-429c-881c-018f0f2e3fff-etcd-client\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.019599 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.020296 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-wpwx7"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.020396 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-mbfs5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.022583 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.023271 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/415bb537-8c4e-4114-b627-6c76d7cb6738-serving-cert\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.023286 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.023490 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-client-ca\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.023731 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.025502 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-dgdnq"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.025996 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.026547 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.030896 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cncdj"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.030955 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.031614 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.032085 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.032403 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.033332 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.033654 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af455a3c-331c-44ed-9619-1f66379fd774-client-ca\") pod \"route-controller-manager-6576b87f9c-p6jkw\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.033694 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.034258 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.034470 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.034926 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.035548 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/25892407-7061-429c-881c-018f0f2e3fff-encryption-config\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.035927 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.039993 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/fd8e23b3-5b64-47bc-903f-4feb12a34389-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-v4mzd\" (UID: \"fd8e23b3-5b64-47bc-903f-4feb12a34389\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.040304 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.040332 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.040999 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-zkw8s"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.041487 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.042188 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.042514 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.042722 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-zkw8s" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.042876 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.043709 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.046333 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.048671 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.050655 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.059325 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xkhl5"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.060607 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-j5mbx"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.061736 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.065606 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25892407-7061-429c-881c-018f0f2e3fff-serving-cert\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.069972 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.071263 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.071560 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.072576 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-d474c"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.073769 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-d474c" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.075107 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.076450 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-546pp"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.076517 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.077701 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.077796 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.078207 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-pt6z4"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.080879 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-lx9b2"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.082160 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-4d9rn"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.083461 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-mbfs5"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.085504 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.086796 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.088106 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9rlgm"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.089445 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.090271 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.091118 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-j5mbx"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.092383 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.093461 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cncdj"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.094557 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.095752 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-gg998"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.097878 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.098004 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.099005 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-kjg6s"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.099915 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-kjg6s" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.100134 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.101260 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5hrps"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.102348 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.103765 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.105217 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.106712 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.107673 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-d474c"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.108794 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-zkw8s"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.109334 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8db32703-3873-413f-b4b4-e1ab1d68abe8-serving-cert\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.109876 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110116 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/dce034f5-3560-4de1-8922-ae8f80ea0fac-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-f8rwc\" (UID: \"dce034f5-3560-4de1-8922-ae8f80ea0fac\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110163 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8db32703-3873-413f-b4b4-e1ab1d68abe8-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110196 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b41f44d-d610-4a87-8ce0-bfb0764ab749-config\") pod \"kube-apiserver-operator-766d6c64bb-mb82t\" (UID: \"3b41f44d-d610-4a87-8ce0-bfb0764ab749\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110224 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8db32703-3873-413f-b4b4-e1ab1d68abe8-audit-policies\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110251 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/17683ffc-bd60-4096-80d0-5692bcc71422-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110376 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-m64th\" (UID: \"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110675 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3b41f44d-d610-4a87-8ce0-bfb0764ab749-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-mb82t\" (UID: \"3b41f44d-d610-4a87-8ce0-bfb0764ab749\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110761 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfhhl\" (UniqueName: \"kubernetes.io/projected/8db32703-3873-413f-b4b4-e1ab1d68abe8-kube-api-access-zfhhl\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110791 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8d422902-7c72-47b8-8c80-c8c7c63d57f7-etcd-client\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110840 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zx4f4\" (UniqueName: \"kubernetes.io/projected/13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0-kube-api-access-zx4f4\") pod \"cluster-image-registry-operator-dc59b4c8b-m64th\" (UID: \"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110871 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/8d422902-7c72-47b8-8c80-c8c7c63d57f7-etcd-ca\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110898 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17683ffc-bd60-4096-80d0-5692bcc71422-config\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110928 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/17683ffc-bd60-4096-80d0-5692bcc71422-serving-cert\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110957 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d422902-7c72-47b8-8c80-c8c7c63d57f7-config\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.110999 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-j4pqg\" (UID: \"a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111029 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/18cfe787-4f63-47c1-a563-d217e266d468-serving-cert\") pod \"openshift-config-operator-7777fb866f-6sfkf\" (UID: \"18cfe787-4f63-47c1-a563-d217e266d468\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111084 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxqfm\" (UniqueName: \"kubernetes.io/projected/18cfe787-4f63-47c1-a563-d217e266d468-kube-api-access-qxqfm\") pod \"openshift-config-operator-7777fb866f-6sfkf\" (UID: \"18cfe787-4f63-47c1-a563-d217e266d468\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111113 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c815bdba-e39d-4b49-a85f-28a1e17cfc3c-trusted-ca\") pod \"console-operator-58897d9998-wsf4w\" (UID: \"c815bdba-e39d-4b49-a85f-28a1e17cfc3c\") " pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111141 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b41f44d-d610-4a87-8ce0-bfb0764ab749-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-mb82t\" (UID: \"3b41f44d-d610-4a87-8ce0-bfb0764ab749\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111163 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8db32703-3873-413f-b4b4-e1ab1d68abe8-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111172 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d422902-7c72-47b8-8c80-c8c7c63d57f7-etcd-service-ca\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111285 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/18cfe787-4f63-47c1-a563-d217e266d468-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6sfkf\" (UID: \"18cfe787-4f63-47c1-a563-d217e266d468\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111325 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-m64th\" (UID: \"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111350 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/8db32703-3873-413f-b4b4-e1ab1d68abe8-encryption-config\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111374 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/55764537-dbcc-4a71-8dbd-42cb335e045a-metrics-tls\") pod \"ingress-operator-5b745b69d9-v8xwt\" (UID: \"55764537-dbcc-4a71-8dbd-42cb335e045a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111398 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/55764537-dbcc-4a71-8dbd-42cb335e045a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-v8xwt\" (UID: \"55764537-dbcc-4a71-8dbd-42cb335e045a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111423 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d422902-7c72-47b8-8c80-c8c7c63d57f7-serving-cert\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111461 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/17683ffc-bd60-4096-80d0-5692bcc71422-service-ca-bundle\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111482 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwvxp\" (UniqueName: \"kubernetes.io/projected/17683ffc-bd60-4096-80d0-5692bcc71422-kube-api-access-zwvxp\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111513 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brj5l\" (UniqueName: \"kubernetes.io/projected/a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100-kube-api-access-brj5l\") pod \"openshift-controller-manager-operator-756b6f6bc6-j4pqg\" (UID: \"a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111539 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c815bdba-e39d-4b49-a85f-28a1e17cfc3c-serving-cert\") pod \"console-operator-58897d9998-wsf4w\" (UID: \"c815bdba-e39d-4b49-a85f-28a1e17cfc3c\") " pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111559 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af7457bc-9a2f-4b9a-a641-58ba17f4d08e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-mptkq\" (UID: \"af7457bc-9a2f-4b9a-a641-58ba17f4d08e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111582 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8db32703-3873-413f-b4b4-e1ab1d68abe8-audit-dir\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111619 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-j4pqg\" (UID: \"a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111639 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcgjn\" (UniqueName: \"kubernetes.io/projected/8d422902-7c72-47b8-8c80-c8c7c63d57f7-kube-api-access-xcgjn\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111690 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qffq9\" (UniqueName: \"kubernetes.io/projected/c815bdba-e39d-4b49-a85f-28a1e17cfc3c-kube-api-access-qffq9\") pod \"console-operator-58897d9998-wsf4w\" (UID: \"c815bdba-e39d-4b49-a85f-28a1e17cfc3c\") " pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111717 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af7457bc-9a2f-4b9a-a641-58ba17f4d08e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-mptkq\" (UID: \"af7457bc-9a2f-4b9a-a641-58ba17f4d08e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111742 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6gv5\" (UniqueName: \"kubernetes.io/projected/55764537-dbcc-4a71-8dbd-42cb335e045a-kube-api-access-j6gv5\") pod \"ingress-operator-5b745b69d9-v8xwt\" (UID: \"55764537-dbcc-4a71-8dbd-42cb335e045a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111761 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8db32703-3873-413f-b4b4-e1ab1d68abe8-etcd-client\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111783 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dv8wr\" (UniqueName: \"kubernetes.io/projected/dce034f5-3560-4de1-8922-ae8f80ea0fac-kube-api-access-dv8wr\") pod \"cluster-samples-operator-665b6dd947-f8rwc\" (UID: \"dce034f5-3560-4de1-8922-ae8f80ea0fac\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111809 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/55764537-dbcc-4a71-8dbd-42cb335e045a-trusted-ca\") pod \"ingress-operator-5b745b69d9-v8xwt\" (UID: \"55764537-dbcc-4a71-8dbd-42cb335e045a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111828 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxvmj\" (UniqueName: \"kubernetes.io/projected/af7457bc-9a2f-4b9a-a641-58ba17f4d08e-kube-api-access-qxvmj\") pod \"openshift-apiserver-operator-796bbdcf4f-mptkq\" (UID: \"af7457bc-9a2f-4b9a-a641-58ba17f4d08e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111849 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c815bdba-e39d-4b49-a85f-28a1e17cfc3c-config\") pod \"console-operator-58897d9998-wsf4w\" (UID: \"c815bdba-e39d-4b49-a85f-28a1e17cfc3c\") " pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111871 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/8db32703-3873-413f-b4b4-e1ab1d68abe8-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.111891 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-m64th\" (UID: \"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.112067 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17683ffc-bd60-4096-80d0-5692bcc71422-config\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.112100 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b41f44d-d610-4a87-8ce0-bfb0764ab749-config\") pod \"kube-apiserver-operator-766d6c64bb-mb82t\" (UID: \"3b41f44d-d610-4a87-8ce0-bfb0764ab749\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.112104 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/17683ffc-bd60-4096-80d0-5692bcc71422-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.112679 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.112905 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/18cfe787-4f63-47c1-a563-d217e266d468-available-featuregates\") pod \"openshift-config-operator-7777fb866f-6sfkf\" (UID: \"18cfe787-4f63-47c1-a563-d217e266d468\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.112929 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.113071 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/8db32703-3873-413f-b4b4-e1ab1d68abe8-audit-policies\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.113099 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-546pp"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.113356 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-j4pqg\" (UID: \"a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.114074 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-m64th\" (UID: \"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.114545 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/c815bdba-e39d-4b49-a85f-28a1e17cfc3c-trusted-ca\") pod \"console-operator-58897d9998-wsf4w\" (UID: \"c815bdba-e39d-4b49-a85f-28a1e17cfc3c\") " pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.114649 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c815bdba-e39d-4b49-a85f-28a1e17cfc3c-config\") pod \"console-operator-58897d9998-wsf4w\" (UID: \"c815bdba-e39d-4b49-a85f-28a1e17cfc3c\") " pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.114711 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/8db32703-3873-413f-b4b4-e1ab1d68abe8-audit-dir\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.114801 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af7457bc-9a2f-4b9a-a641-58ba17f4d08e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-mptkq\" (UID: \"af7457bc-9a2f-4b9a-a641-58ba17f4d08e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.115314 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/8db32703-3873-413f-b4b4-e1ab1d68abe8-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.115530 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/17683ffc-bd60-4096-80d0-5692bcc71422-service-ca-bundle\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.116509 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/55764537-dbcc-4a71-8dbd-42cb335e045a-trusted-ca\") pod \"ingress-operator-5b745b69d9-v8xwt\" (UID: \"55764537-dbcc-4a71-8dbd-42cb335e045a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.116659 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-gg998"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.117383 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.118245 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8db32703-3873-413f-b4b4-e1ab1d68abe8-serving-cert\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.118620 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8db32703-3873-413f-b4b4-e1ab1d68abe8-etcd-client\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.118999 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af7457bc-9a2f-4b9a-a641-58ba17f4d08e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-mptkq\" (UID: \"af7457bc-9a2f-4b9a-a641-58ba17f4d08e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.119063 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/55764537-dbcc-4a71-8dbd-42cb335e045a-metrics-tls\") pod \"ingress-operator-5b745b69d9-v8xwt\" (UID: \"55764537-dbcc-4a71-8dbd-42cb335e045a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.120230 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-j4pqg\" (UID: \"a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.120915 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b41f44d-d610-4a87-8ce0-bfb0764ab749-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-mb82t\" (UID: \"3b41f44d-d610-4a87-8ce0-bfb0764ab749\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.121569 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c815bdba-e39d-4b49-a85f-28a1e17cfc3c-serving-cert\") pod \"console-operator-58897d9998-wsf4w\" (UID: \"c815bdba-e39d-4b49-a85f-28a1e17cfc3c\") " pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.123355 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-m9g2t"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.124067 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/17683ffc-bd60-4096-80d0-5692bcc71422-serving-cert\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.124491 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-m9g2t" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.124792 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/8db32703-3873-413f-b4b4-e1ab1d68abe8-encryption-config\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.124480 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-m64th\" (UID: \"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.124882 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-m9g2t"] Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.126766 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/18cfe787-4f63-47c1-a563-d217e266d468-serving-cert\") pod \"openshift-config-operator-7777fb866f-6sfkf\" (UID: \"18cfe787-4f63-47c1-a563-d217e266d468\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.138325 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.151893 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.171205 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.190748 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.209982 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.212398 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d422902-7c72-47b8-8c80-c8c7c63d57f7-serving-cert\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.212650 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcgjn\" (UniqueName: \"kubernetes.io/projected/8d422902-7c72-47b8-8c80-c8c7c63d57f7-kube-api-access-xcgjn\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.212771 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dv8wr\" (UniqueName: \"kubernetes.io/projected/dce034f5-3560-4de1-8922-ae8f80ea0fac-kube-api-access-dv8wr\") pod \"cluster-samples-operator-665b6dd947-f8rwc\" (UID: \"dce034f5-3560-4de1-8922-ae8f80ea0fac\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.212869 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/dce034f5-3560-4de1-8922-ae8f80ea0fac-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-f8rwc\" (UID: \"dce034f5-3560-4de1-8922-ae8f80ea0fac\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.212997 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8d422902-7c72-47b8-8c80-c8c7c63d57f7-etcd-client\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.213099 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/8d422902-7c72-47b8-8c80-c8c7c63d57f7-etcd-ca\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.213200 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d422902-7c72-47b8-8c80-c8c7c63d57f7-config\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.213302 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d422902-7c72-47b8-8c80-c8c7c63d57f7-etcd-service-ca\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.213682 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/8d422902-7c72-47b8-8c80-c8c7c63d57f7-etcd-ca\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.213832 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/8d422902-7c72-47b8-8c80-c8c7c63d57f7-etcd-service-ca\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.214020 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d422902-7c72-47b8-8c80-c8c7c63d57f7-config\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.216146 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d422902-7c72-47b8-8c80-c8c7c63d57f7-serving-cert\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.216218 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/8d422902-7c72-47b8-8c80-c8c7c63d57f7-etcd-client\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.230879 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.250910 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.270826 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.290120 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.314779 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.340160 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.352744 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.391163 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.410530 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.431178 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.451087 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.458235 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/dce034f5-3560-4de1-8922-ae8f80ea0fac-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-f8rwc\" (UID: \"dce034f5-3560-4de1-8922-ae8f80ea0fac\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.471081 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.491693 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.511008 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.530563 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.550916 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.571191 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.590501 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.642131 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mw4q\" (UniqueName: \"kubernetes.io/projected/4e7d9725-55d4-4230-a690-6f1a647e353d-kube-api-access-4mw4q\") pod \"oauth-openshift-558db77b4-qjqjf\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.659563 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gp8j\" (UniqueName: \"kubernetes.io/projected/fd8e23b3-5b64-47bc-903f-4feb12a34389-kube-api-access-9gp8j\") pod \"machine-api-operator-5694c8668f-v4mzd\" (UID: \"fd8e23b3-5b64-47bc-903f-4feb12a34389\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.677362 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szbbd\" (UniqueName: \"kubernetes.io/projected/415bb537-8c4e-4114-b627-6c76d7cb6738-kube-api-access-szbbd\") pod \"controller-manager-879f6c89f-rqkv4\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.699033 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blxpf\" (UniqueName: \"kubernetes.io/projected/25892407-7061-429c-881c-018f0f2e3fff-kube-api-access-blxpf\") pod \"apiserver-76f77b778f-4d9rn\" (UID: \"25892407-7061-429c-881c-018f0f2e3fff\") " pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.711387 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.715836 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mswlq\" (UniqueName: \"kubernetes.io/projected/af455a3c-331c-44ed-9619-1f66379fd774-kube-api-access-mswlq\") pod \"route-controller-manager-6576b87f9c-p6jkw\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.731393 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.751372 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.771320 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.794141 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.806516 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.811679 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.819129 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rhvg\" (UniqueName: \"kubernetes.io/projected/f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be-kube-api-access-4rhvg\") pod \"image-pruner-29422080-pq6kj\" (UID: \"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be\") " pod="openshift-image-registry/image-pruner-29422080-pq6kj" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.822664 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29422080-pq6kj" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.831600 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.852316 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.867772 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.870650 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.891573 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.898985 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.911074 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.912628 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.932878 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.950852 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.972382 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 10 00:32:52 crc kubenswrapper[4884]: I1210 00:32:52.990421 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.011314 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.031645 4884 request.go:700] Waited for 1.004096943s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.031883 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.032137 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.032213 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:53 crc kubenswrapper[4884]: E1210 00:32:53.032412 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:34:55.032387614 +0000 UTC m=+268.110344751 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.033106 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.040130 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.054815 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.076797 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.091855 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.111057 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.116685 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-rqkv4"] Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.122068 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-v4mzd"] Dec 10 00:32:53 crc kubenswrapper[4884]: W1210 00:32:53.129808 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod415bb537_8c4e_4114_b627_6c76d7cb6738.slice/crio-5afbb919a42989cfd1c7efa7684c523d41bbf8e168028e833292fedf03affdb6 WatchSource:0}: Error finding container 5afbb919a42989cfd1c7efa7684c523d41bbf8e168028e833292fedf03affdb6: Status 404 returned error can't find the container with id 5afbb919a42989cfd1c7efa7684c523d41bbf8e168028e833292fedf03affdb6 Dec 10 00:32:53 crc kubenswrapper[4884]: W1210 00:32:53.129973 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd8e23b3_5b64_47bc_903f_4feb12a34389.slice/crio-0af5220e214273051dd2e7e1c0304612ca4c96b04caaf3f3b3c45298d3e0550f WatchSource:0}: Error finding container 0af5220e214273051dd2e7e1c0304612ca4c96b04caaf3f3b3c45298d3e0550f: Status 404 returned error can't find the container with id 0af5220e214273051dd2e7e1c0304612ca4c96b04caaf3f3b3c45298d3e0550f Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.130759 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.133877 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.133910 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.140280 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.140810 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.163241 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.167380 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qjqjf"] Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.170083 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29422080-pq6kj"] Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.171664 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 10 00:32:53 crc kubenswrapper[4884]: W1210 00:32:53.178943 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf3e16b9b_bb1e_42e5_aa48_cc0f2e6cb8be.slice/crio-375e27a17c6be9c177599518160f4e661ab1264ece35dca5790381b836edd3fb WatchSource:0}: Error finding container 375e27a17c6be9c177599518160f4e661ab1264ece35dca5790381b836edd3fb: Status 404 returned error can't find the container with id 375e27a17c6be9c177599518160f4e661ab1264ece35dca5790381b836edd3fb Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.190617 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.212145 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.214157 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.226762 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.230541 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.237383 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw"] Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.250223 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.271380 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.274233 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-4d9rn"] Dec 10 00:32:53 crc kubenswrapper[4884]: W1210 00:32:53.285888 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25892407_7061_429c_881c_018f0f2e3fff.slice/crio-48fdeae4d6f505c842a053f2b566f0d60fe019986757c89527e50b9a45510469 WatchSource:0}: Error finding container 48fdeae4d6f505c842a053f2b566f0d60fe019986757c89527e50b9a45510469: Status 404 returned error can't find the container with id 48fdeae4d6f505c842a053f2b566f0d60fe019986757c89527e50b9a45510469 Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.290723 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.307794 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.310293 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.330545 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.352725 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.370944 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.390448 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.412132 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.431473 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.450584 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 10 00:32:53 crc kubenswrapper[4884]: W1210 00:32:53.465227 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-5d0263508fc25bf10c09f517b6e3b60cfa6b0070bb940f7fbb6c2789562d4d10 WatchSource:0}: Error finding container 5d0263508fc25bf10c09f517b6e3b60cfa6b0070bb940f7fbb6c2789562d4d10: Status 404 returned error can't find the container with id 5d0263508fc25bf10c09f517b6e3b60cfa6b0070bb940f7fbb6c2789562d4d10 Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.470889 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.490632 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.493441 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" event={"ID":"af455a3c-331c-44ed-9619-1f66379fd774","Type":"ContainerStarted","Data":"65cdd817d329b317d7782fb14fd992a4e47fbbfb23a2e8d41848149e6587c017"} Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.493502 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" event={"ID":"af455a3c-331c-44ed-9619-1f66379fd774","Type":"ContainerStarted","Data":"6977bb5063bfad7516359a26ac2c538adabea7e56aa8c75386b0d0935b7f4825"} Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.493689 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.494822 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"5d0263508fc25bf10c09f517b6e3b60cfa6b0070bb940f7fbb6c2789562d4d10"} Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.495510 4884 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-p6jkw container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.495568 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" podUID="af455a3c-331c-44ed-9619-1f66379fd774" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.496019 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" event={"ID":"25892407-7061-429c-881c-018f0f2e3fff","Type":"ContainerStarted","Data":"48fdeae4d6f505c842a053f2b566f0d60fe019986757c89527e50b9a45510469"} Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.510976 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.520788 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" event={"ID":"4e7d9725-55d4-4230-a690-6f1a647e353d","Type":"ContainerStarted","Data":"f4bb082b46f18d958cabd77a3e3b70c199c941e90aab8bc8d57d0a1e861063cc"} Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.526184 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29422080-pq6kj" event={"ID":"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be","Type":"ContainerStarted","Data":"bf90c5e073cb9a54d9007051c32418d7c4d65221e66272f4116fec8bffe1014f"} Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.526240 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29422080-pq6kj" event={"ID":"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be","Type":"ContainerStarted","Data":"375e27a17c6be9c177599518160f4e661ab1264ece35dca5790381b836edd3fb"} Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.528987 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" event={"ID":"fd8e23b3-5b64-47bc-903f-4feb12a34389","Type":"ContainerStarted","Data":"e0570c7628098f755e10cc41947262271b5a7075163aa975e066eaac84934555"} Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.529051 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" event={"ID":"fd8e23b3-5b64-47bc-903f-4feb12a34389","Type":"ContainerStarted","Data":"0af5220e214273051dd2e7e1c0304612ca4c96b04caaf3f3b3c45298d3e0550f"} Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.531008 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.533512 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" event={"ID":"415bb537-8c4e-4114-b627-6c76d7cb6738","Type":"ContainerStarted","Data":"f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1"} Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.533562 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" event={"ID":"415bb537-8c4e-4114-b627-6c76d7cb6738","Type":"ContainerStarted","Data":"5afbb919a42989cfd1c7efa7684c523d41bbf8e168028e833292fedf03affdb6"} Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.534350 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.535068 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"c8f5c7d03bda3bf47e8655a144ddb3459ed8cc64d7e8609691f5c88536c43496"} Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.536082 4884 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-rqkv4 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.536122 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" podUID="415bb537-8c4e-4114-b627-6c76d7cb6738" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.550631 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.578933 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.593694 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.611011 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.630983 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.663989 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.672036 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.691405 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.710109 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.732029 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.751738 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.771777 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.790718 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.811801 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.830490 4884 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.850770 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.870449 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.889929 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.910511 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.930723 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.971821 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zx4f4\" (UniqueName: \"kubernetes.io/projected/13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0-kube-api-access-zx4f4\") pod \"cluster-image-registry-operator-dc59b4c8b-m64th\" (UID: \"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:53 crc kubenswrapper[4884]: I1210 00:32:53.992088 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3b41f44d-d610-4a87-8ce0-bfb0764ab749-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-mb82t\" (UID: \"3b41f44d-d610-4a87-8ce0-bfb0764ab749\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.015886 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfhhl\" (UniqueName: \"kubernetes.io/projected/8db32703-3873-413f-b4b4-e1ab1d68abe8-kube-api-access-zfhhl\") pod \"apiserver-7bbb656c7d-m6pkc\" (UID: \"8db32703-3873-413f-b4b4-e1ab1d68abe8\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.031915 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxqfm\" (UniqueName: \"kubernetes.io/projected/18cfe787-4f63-47c1-a563-d217e266d468-kube-api-access-qxqfm\") pod \"openshift-config-operator-7777fb866f-6sfkf\" (UID: \"18cfe787-4f63-47c1-a563-d217e266d468\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.048358 4884 request.go:700] Waited for 1.935163769s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console-operator/serviceaccounts/console-operator/token Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.051519 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-m64th\" (UID: \"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.067088 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qffq9\" (UniqueName: \"kubernetes.io/projected/c815bdba-e39d-4b49-a85f-28a1e17cfc3c-kube-api-access-qffq9\") pod \"console-operator-58897d9998-wsf4w\" (UID: \"c815bdba-e39d-4b49-a85f-28a1e17cfc3c\") " pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.097757 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwvxp\" (UniqueName: \"kubernetes.io/projected/17683ffc-bd60-4096-80d0-5692bcc71422-kube-api-access-zwvxp\") pod \"authentication-operator-69f744f599-wpwx7\" (UID: \"17683ffc-bd60-4096-80d0-5692bcc71422\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.110724 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brj5l\" (UniqueName: \"kubernetes.io/projected/a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100-kube-api-access-brj5l\") pod \"openshift-controller-manager-operator-756b6f6bc6-j4pqg\" (UID: \"a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.129945 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/55764537-dbcc-4a71-8dbd-42cb335e045a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-v8xwt\" (UID: \"55764537-dbcc-4a71-8dbd-42cb335e045a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.146275 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxvmj\" (UniqueName: \"kubernetes.io/projected/af7457bc-9a2f-4b9a-a641-58ba17f4d08e-kube-api-access-qxvmj\") pod \"openshift-apiserver-operator-796bbdcf4f-mptkq\" (UID: \"af7457bc-9a2f-4b9a-a641-58ba17f4d08e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.163826 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.165899 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6gv5\" (UniqueName: \"kubernetes.io/projected/55764537-dbcc-4a71-8dbd-42cb335e045a-kube-api-access-j6gv5\") pod \"ingress-operator-5b745b69d9-v8xwt\" (UID: \"55764537-dbcc-4a71-8dbd-42cb335e045a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.170702 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.176555 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.187273 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.190817 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.194243 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.201685 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.209044 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.212307 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.241974 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.247722 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.252901 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.279825 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcgjn\" (UniqueName: \"kubernetes.io/projected/8d422902-7c72-47b8-8c80-c8c7c63d57f7-kube-api-access-xcgjn\") pod \"etcd-operator-b45778765-xkhl5\" (UID: \"8d422902-7c72-47b8-8c80-c8c7c63d57f7\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.312667 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dv8wr\" (UniqueName: \"kubernetes.io/projected/dce034f5-3560-4de1-8922-ae8f80ea0fac-kube-api-access-dv8wr\") pod \"cluster-samples-operator-665b6dd947-f8rwc\" (UID: \"dce034f5-3560-4de1-8922-ae8f80ea0fac\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.354627 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9g82\" (UniqueName: \"kubernetes.io/projected/d976ab75-284d-4613-adcd-4620ceebf209-kube-api-access-j9g82\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.354672 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5afcb669-190b-4a5b-89cd-a4919e13488d-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zk54f\" (UID: \"5afcb669-190b-4a5b-89cd-a4919e13488d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.354713 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-service-ca\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.354778 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbjsz\" (UniqueName: \"kubernetes.io/projected/7bdeeac5-9323-4934-bf1a-10bb6c8c6f86-kube-api-access-rbjsz\") pod \"control-plane-machine-set-operator-78cbb6b69f-vcrdk\" (UID: \"7bdeeac5-9323-4934-bf1a-10bb6c8c6f86\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.354794 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/d976ab75-284d-4613-adcd-4620ceebf209-stats-auth\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.354823 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0e9b90ca-0276-4a75-9c52-316c83d42c38-machine-approver-tls\") pod \"machine-approver-56656f9798-4ppp4\" (UID: \"0e9b90ca-0276-4a75-9c52-316c83d42c38\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.354853 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.354871 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-oauth-serving-cert\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.354897 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/40fa21aa-487a-46a9-a396-25ba52971640-trusted-ca\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.354928 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-trusted-ca-bundle\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.354952 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-registry-tls\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.354976 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d976ab75-284d-4613-adcd-4620ceebf209-service-ca-bundle\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355050 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbtmf\" (UniqueName: \"kubernetes.io/projected/faf8c014-cf9a-4495-a008-2f56745b6fab-kube-api-access-tbtmf\") pod \"downloads-7954f5f757-lx9b2\" (UID: \"faf8c014-cf9a-4495-a008-2f56745b6fab\") " pod="openshift-console/downloads-7954f5f757-lx9b2" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355094 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4jsd\" (UniqueName: \"kubernetes.io/projected/3a10c3ea-6afa-45d0-b42e-9165a5a1b69c-kube-api-access-m4jsd\") pod \"dns-operator-744455d44c-9rlgm\" (UID: \"3a10c3ea-6afa-45d0-b42e-9165a5a1b69c\") " pod="openshift-dns-operator/dns-operator-744455d44c-9rlgm" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355169 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e9b90ca-0276-4a75-9c52-316c83d42c38-config\") pod \"machine-approver-56656f9798-4ppp4\" (UID: \"0e9b90ca-0276-4a75-9c52-316c83d42c38\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355184 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bthm\" (UniqueName: \"kubernetes.io/projected/0e9b90ca-0276-4a75-9c52-316c83d42c38-kube-api-access-6bthm\") pod \"machine-approver-56656f9798-4ppp4\" (UID: \"0e9b90ca-0276-4a75-9c52-316c83d42c38\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355212 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfm77\" (UniqueName: \"kubernetes.io/projected/270e1ca3-e108-43c1-be6e-cedfbbebd78c-kube-api-access-cfm77\") pod \"migrator-59844c95c7-mbfs5\" (UID: \"270e1ca3-e108-43c1-be6e-cedfbbebd78c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-mbfs5" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355229 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-oauth-config\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355279 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5afcb669-190b-4a5b-89cd-a4919e13488d-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zk54f\" (UID: \"5afcb669-190b-4a5b-89cd-a4919e13488d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355304 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zb8b\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-kube-api-access-6zb8b\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355324 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cd58f93-65b6-4467-b204-85e3e71f3e37-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-thg2l\" (UID: \"2cd58f93-65b6-4467-b204-85e3e71f3e37\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355348 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/40fa21aa-487a-46a9-a396-25ba52971640-registry-certificates\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355390 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0e9b90ca-0276-4a75-9c52-316c83d42c38-auth-proxy-config\") pod \"machine-approver-56656f9798-4ppp4\" (UID: \"0e9b90ca-0276-4a75-9c52-316c83d42c38\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355406 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-config\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355444 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/40fa21aa-487a-46a9-a396-25ba52971640-ca-trust-extracted\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355458 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3a10c3ea-6afa-45d0-b42e-9165a5a1b69c-metrics-tls\") pod \"dns-operator-744455d44c-9rlgm\" (UID: \"3a10c3ea-6afa-45d0-b42e-9165a5a1b69c\") " pod="openshift-dns-operator/dns-operator-744455d44c-9rlgm" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355483 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cd58f93-65b6-4467-b204-85e3e71f3e37-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-thg2l\" (UID: \"2cd58f93-65b6-4467-b204-85e3e71f3e37\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355506 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/d976ab75-284d-4613-adcd-4620ceebf209-default-certificate\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355534 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-bound-sa-token\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355558 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d976ab75-284d-4613-adcd-4620ceebf209-metrics-certs\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355573 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5afcb669-190b-4a5b-89cd-a4919e13488d-config\") pod \"kube-controller-manager-operator-78b949d7b-zk54f\" (UID: \"5afcb669-190b-4a5b-89cd-a4919e13488d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355618 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/40fa21aa-487a-46a9-a396-25ba52971640-installation-pull-secrets\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355640 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dr6c4\" (UniqueName: \"kubernetes.io/projected/2cd58f93-65b6-4467-b204-85e3e71f3e37-kube-api-access-dr6c4\") pod \"kube-storage-version-migrator-operator-b67b599dd-thg2l\" (UID: \"2cd58f93-65b6-4467-b204-85e3e71f3e37\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355661 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzdrj\" (UniqueName: \"kubernetes.io/projected/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-kube-api-access-tzdrj\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355686 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/7bdeeac5-9323-4934-bf1a-10bb6c8c6f86-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-vcrdk\" (UID: \"7bdeeac5-9323-4934-bf1a-10bb6c8c6f86\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.355733 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-serving-cert\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: E1210 00:32:54.358695 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:54.85867202 +0000 UTC m=+147.936629137 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.416041 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.456685 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.456944 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-oauth-serving-cert\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.456978 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcrbb\" (UniqueName: \"kubernetes.io/projected/1a84b97e-b00f-4665-8266-6a1f6905211d-kube-api-access-zcrbb\") pod \"catalog-operator-68c6474976-d5v66\" (UID: \"1a84b97e-b00f-4665-8266-6a1f6905211d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:32:54 crc kubenswrapper[4884]: E1210 00:32:54.457020 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:54.956994716 +0000 UTC m=+148.034951833 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.458579 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-oauth-serving-cert\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.458720 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lhl2\" (UniqueName: \"kubernetes.io/projected/64efda08-9d0e-40e7-a5a2-766b13a809a8-kube-api-access-8lhl2\") pod \"packageserver-d55dfcdfc-4qztz\" (UID: \"64efda08-9d0e-40e7-a5a2-766b13a809a8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.458781 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/40fa21aa-487a-46a9-a396-25ba52971640-trusted-ca\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.460847 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/40fa21aa-487a-46a9-a396-25ba52971640-trusted-ca\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.460908 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/e20684d7-d620-4c84-8883-7b849f8649bc-srv-cert\") pod \"olm-operator-6b444d44fb-28hlg\" (UID: \"e20684d7-d620-4c84-8883-7b849f8649bc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.460948 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/83389922-a88d-4598-9d07-83d75e94f161-images\") pod \"machine-config-operator-74547568cd-546pp\" (UID: \"83389922-a88d-4598-9d07-83d75e94f161\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.461215 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-trusted-ca-bundle\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462204 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/03818f02-632d-43da-ab6b-792f3628a645-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-zkw8s\" (UID: \"03818f02-632d-43da-ab6b-792f3628a645\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zkw8s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462270 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-registry-tls\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462306 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzmq9\" (UniqueName: \"kubernetes.io/projected/836a0995-4677-4878-b028-13d90eda9a68-kube-api-access-bzmq9\") pod \"service-ca-operator-777779d784-9r4jw\" (UID: \"836a0995-4677-4878-b028-13d90eda9a68\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462341 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-secret-volume\") pod \"collect-profiles-29422110-4mw26\" (UID: \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462386 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d976ab75-284d-4613-adcd-4620ceebf209-service-ca-bundle\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462412 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1a84b97e-b00f-4665-8266-6a1f6905211d-profile-collector-cert\") pod \"catalog-operator-68c6474976-d5v66\" (UID: \"1a84b97e-b00f-4665-8266-6a1f6905211d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462483 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7eef9efb-6f3c-4262-8d29-3a871fdbd304-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-shqxq\" (UID: \"7eef9efb-6f3c-4262-8d29-3a871fdbd304\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462541 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5823e457-6c7a-416f-bf0c-33ae321434d0-node-bootstrap-token\") pod \"machine-config-server-kjg6s\" (UID: \"5823e457-6c7a-416f-bf0c-33ae321434d0\") " pod="openshift-machine-config-operator/machine-config-server-kjg6s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462576 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22rdd\" (UniqueName: \"kubernetes.io/projected/7eef9efb-6f3c-4262-8d29-3a871fdbd304-kube-api-access-22rdd\") pod \"machine-config-controller-84d6567774-shqxq\" (UID: \"7eef9efb-6f3c-4262-8d29-3a871fdbd304\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462607 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbtmf\" (UniqueName: \"kubernetes.io/projected/faf8c014-cf9a-4495-a008-2f56745b6fab-kube-api-access-tbtmf\") pod \"downloads-7954f5f757-lx9b2\" (UID: \"faf8c014-cf9a-4495-a008-2f56745b6fab\") " pod="openshift-console/downloads-7954f5f757-lx9b2" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462633 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnk28\" (UniqueName: \"kubernetes.io/projected/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-kube-api-access-lnk28\") pod \"collect-profiles-29422110-4mw26\" (UID: \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462711 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7eef9efb-6f3c-4262-8d29-3a871fdbd304-proxy-tls\") pod \"machine-config-controller-84d6567774-shqxq\" (UID: \"7eef9efb-6f3c-4262-8d29-3a871fdbd304\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462739 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/83389922-a88d-4598-9d07-83d75e94f161-auth-proxy-config\") pod \"machine-config-operator-74547568cd-546pp\" (UID: \"83389922-a88d-4598-9d07-83d75e94f161\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462797 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b-signing-key\") pod \"service-ca-9c57cc56f-j5mbx\" (UID: \"7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462848 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4jsd\" (UniqueName: \"kubernetes.io/projected/3a10c3ea-6afa-45d0-b42e-9165a5a1b69c-kube-api-access-m4jsd\") pod \"dns-operator-744455d44c-9rlgm\" (UID: \"3a10c3ea-6afa-45d0-b42e-9165a5a1b69c\") " pod="openshift-dns-operator/dns-operator-744455d44c-9rlgm" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462881 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/d5738350-616d-4dd8-aba6-fadd8b3271ba-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-k28nx\" (UID: \"d5738350-616d-4dd8-aba6-fadd8b3271ba\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462933 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-registration-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462959 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhncz\" (UniqueName: \"kubernetes.io/projected/5823e457-6c7a-416f-bf0c-33ae321434d0-kube-api-access-nhncz\") pod \"machine-config-server-kjg6s\" (UID: \"5823e457-6c7a-416f-bf0c-33ae321434d0\") " pod="openshift-machine-config-operator/machine-config-server-kjg6s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.462992 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e9b90ca-0276-4a75-9c52-316c83d42c38-config\") pod \"machine-approver-56656f9798-4ppp4\" (UID: \"0e9b90ca-0276-4a75-9c52-316c83d42c38\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463019 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bthm\" (UniqueName: \"kubernetes.io/projected/0e9b90ca-0276-4a75-9c52-316c83d42c38-kube-api-access-6bthm\") pod \"machine-approver-56656f9798-4ppp4\" (UID: \"0e9b90ca-0276-4a75-9c52-316c83d42c38\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463045 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-cncdj\" (UID: \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\") " pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463090 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfm77\" (UniqueName: \"kubernetes.io/projected/270e1ca3-e108-43c1-be6e-cedfbbebd78c-kube-api-access-cfm77\") pod \"migrator-59844c95c7-mbfs5\" (UID: \"270e1ca3-e108-43c1-be6e-cedfbbebd78c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-mbfs5" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463117 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f211a330-2aa3-4f37-b1e0-3d198d18fe14-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dz54v\" (UID: \"f211a330-2aa3-4f37-b1e0-3d198d18fe14\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463146 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-oauth-config\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463171 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f211a330-2aa3-4f37-b1e0-3d198d18fe14-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dz54v\" (UID: \"f211a330-2aa3-4f37-b1e0-3d198d18fe14\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463193 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsnhj\" (UniqueName: \"kubernetes.io/projected/d5738350-616d-4dd8-aba6-fadd8b3271ba-kube-api-access-wsnhj\") pod \"package-server-manager-789f6589d5-k28nx\" (UID: \"d5738350-616d-4dd8-aba6-fadd8b3271ba\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463240 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5afcb669-190b-4a5b-89cd-a4919e13488d-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zk54f\" (UID: \"5afcb669-190b-4a5b-89cd-a4919e13488d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463270 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zb8b\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-kube-api-access-6zb8b\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463312 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cd58f93-65b6-4467-b204-85e3e71f3e37-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-thg2l\" (UID: \"2cd58f93-65b6-4467-b204-85e3e71f3e37\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463358 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/40fa21aa-487a-46a9-a396-25ba52971640-registry-certificates\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463385 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjrlv\" (UniqueName: \"kubernetes.io/projected/c5d91502-3f40-430c-981d-3427daa3dd4e-kube-api-access-bjrlv\") pod \"ingress-canary-m9g2t\" (UID: \"c5d91502-3f40-430c-981d-3427daa3dd4e\") " pod="openshift-ingress-canary/ingress-canary-m9g2t" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.463415 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-config-volume\") pod \"collect-profiles-29422110-4mw26\" (UID: \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.466157 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d976ab75-284d-4613-adcd-4620ceebf209-service-ca-bundle\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.469128 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e9b90ca-0276-4a75-9c52-316c83d42c38-config\") pod \"machine-approver-56656f9798-4ppp4\" (UID: \"0e9b90ca-0276-4a75-9c52-316c83d42c38\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.471225 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/40fa21aa-487a-46a9-a396-25ba52971640-registry-certificates\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474370 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drdmc\" (UniqueName: \"kubernetes.io/projected/7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b-kube-api-access-drdmc\") pod \"service-ca-9c57cc56f-j5mbx\" (UID: \"7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474508 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4zpf\" (UniqueName: \"kubernetes.io/projected/83389922-a88d-4598-9d07-83d75e94f161-kube-api-access-x4zpf\") pod \"machine-config-operator-74547568cd-546pp\" (UID: \"83389922-a88d-4598-9d07-83d75e94f161\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474592 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0e9b90ca-0276-4a75-9c52-316c83d42c38-auth-proxy-config\") pod \"machine-approver-56656f9798-4ppp4\" (UID: \"0e9b90ca-0276-4a75-9c52-316c83d42c38\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474625 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-config\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474669 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-cncdj\" (UID: \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\") " pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474695 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/64efda08-9d0e-40e7-a5a2-766b13a809a8-apiservice-cert\") pod \"packageserver-d55dfcdfc-4qztz\" (UID: \"64efda08-9d0e-40e7-a5a2-766b13a809a8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474719 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/64efda08-9d0e-40e7-a5a2-766b13a809a8-webhook-cert\") pod \"packageserver-d55dfcdfc-4qztz\" (UID: \"64efda08-9d0e-40e7-a5a2-766b13a809a8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474748 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw894\" (UniqueName: \"kubernetes.io/projected/1555588f-4670-4be8-8e6f-3270e377d2ce-kube-api-access-xw894\") pod \"dns-default-d474c\" (UID: \"1555588f-4670-4be8-8e6f-3270e377d2ce\") " pod="openshift-dns/dns-default-d474c" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474773 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-977d7\" (UniqueName: \"kubernetes.io/projected/03818f02-632d-43da-ab6b-792f3628a645-kube-api-access-977d7\") pod \"multus-admission-controller-857f4d67dd-zkw8s\" (UID: \"03818f02-632d-43da-ab6b-792f3628a645\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zkw8s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474800 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-mountpoint-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474842 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/40fa21aa-487a-46a9-a396-25ba52971640-ca-trust-extracted\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474872 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3a10c3ea-6afa-45d0-b42e-9165a5a1b69c-metrics-tls\") pod \"dns-operator-744455d44c-9rlgm\" (UID: \"3a10c3ea-6afa-45d0-b42e-9165a5a1b69c\") " pod="openshift-dns-operator/dns-operator-744455d44c-9rlgm" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474913 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfklc\" (UniqueName: \"kubernetes.io/projected/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-kube-api-access-tfklc\") pod \"marketplace-operator-79b997595-cncdj\" (UID: \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\") " pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474942 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-plugins-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.474971 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdnbc\" (UniqueName: \"kubernetes.io/projected/e20684d7-d620-4c84-8883-7b849f8649bc-kube-api-access-qdnbc\") pod \"olm-operator-6b444d44fb-28hlg\" (UID: \"e20684d7-d620-4c84-8883-7b849f8649bc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475038 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cd58f93-65b6-4467-b204-85e3e71f3e37-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-thg2l\" (UID: \"2cd58f93-65b6-4467-b204-85e3e71f3e37\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475106 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/d976ab75-284d-4613-adcd-4620ceebf209-default-certificate\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475137 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1555588f-4670-4be8-8e6f-3270e377d2ce-config-volume\") pod \"dns-default-d474c\" (UID: \"1555588f-4670-4be8-8e6f-3270e377d2ce\") " pod="openshift-dns/dns-default-d474c" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475164 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-bound-sa-token\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475193 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d976ab75-284d-4613-adcd-4620ceebf209-metrics-certs\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475219 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5afcb669-190b-4a5b-89cd-a4919e13488d-config\") pod \"kube-controller-manager-operator-78b949d7b-zk54f\" (UID: \"5afcb669-190b-4a5b-89cd-a4919e13488d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475252 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/40fa21aa-487a-46a9-a396-25ba52971640-installation-pull-secrets\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475279 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dr6c4\" (UniqueName: \"kubernetes.io/projected/2cd58f93-65b6-4467-b204-85e3e71f3e37-kube-api-access-dr6c4\") pod \"kube-storage-version-migrator-operator-b67b599dd-thg2l\" (UID: \"2cd58f93-65b6-4467-b204-85e3e71f3e37\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475307 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzdrj\" (UniqueName: \"kubernetes.io/projected/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-kube-api-access-tzdrj\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475336 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/7bdeeac5-9323-4934-bf1a-10bb6c8c6f86-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-vcrdk\" (UID: \"7bdeeac5-9323-4934-bf1a-10bb6c8c6f86\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475444 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-serving-cert\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475480 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9g82\" (UniqueName: \"kubernetes.io/projected/d976ab75-284d-4613-adcd-4620ceebf209-kube-api-access-j9g82\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475516 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5afcb669-190b-4a5b-89cd-a4919e13488d-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zk54f\" (UID: \"5afcb669-190b-4a5b-89cd-a4919e13488d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475550 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-csi-data-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475577 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-service-ca\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475602 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/836a0995-4677-4878-b028-13d90eda9a68-serving-cert\") pod \"service-ca-operator-777779d784-9r4jw\" (UID: \"836a0995-4677-4878-b028-13d90eda9a68\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475631 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/e20684d7-d620-4c84-8883-7b849f8649bc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-28hlg\" (UID: \"e20684d7-d620-4c84-8883-7b849f8649bc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475662 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbjsz\" (UniqueName: \"kubernetes.io/projected/7bdeeac5-9323-4934-bf1a-10bb6c8c6f86-kube-api-access-rbjsz\") pod \"control-plane-machine-set-operator-78cbb6b69f-vcrdk\" (UID: \"7bdeeac5-9323-4934-bf1a-10bb6c8c6f86\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475688 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/d976ab75-284d-4613-adcd-4620ceebf209-stats-auth\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475715 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f211a330-2aa3-4f37-b1e0-3d198d18fe14-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dz54v\" (UID: \"f211a330-2aa3-4f37-b1e0-3d198d18fe14\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475742 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/64efda08-9d0e-40e7-a5a2-766b13a809a8-tmpfs\") pod \"packageserver-d55dfcdfc-4qztz\" (UID: \"64efda08-9d0e-40e7-a5a2-766b13a809a8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475795 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5823e457-6c7a-416f-bf0c-33ae321434d0-certs\") pod \"machine-config-server-kjg6s\" (UID: \"5823e457-6c7a-416f-bf0c-33ae321434d0\") " pod="openshift-machine-config-operator/machine-config-server-kjg6s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475824 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c5d91502-3f40-430c-981d-3427daa3dd4e-cert\") pod \"ingress-canary-m9g2t\" (UID: \"c5d91502-3f40-430c-981d-3427daa3dd4e\") " pod="openshift-ingress-canary/ingress-canary-m9g2t" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475849 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/83389922-a88d-4598-9d07-83d75e94f161-proxy-tls\") pod \"machine-config-operator-74547568cd-546pp\" (UID: \"83389922-a88d-4598-9d07-83d75e94f161\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475873 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b-signing-cabundle\") pod \"service-ca-9c57cc56f-j5mbx\" (UID: \"7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475909 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-socket-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475966 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0e9b90ca-0276-4a75-9c52-316c83d42c38-machine-approver-tls\") pod \"machine-approver-56656f9798-4ppp4\" (UID: \"0e9b90ca-0276-4a75-9c52-316c83d42c38\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.475992 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1555588f-4670-4be8-8e6f-3270e377d2ce-metrics-tls\") pod \"dns-default-d474c\" (UID: \"1555588f-4670-4be8-8e6f-3270e377d2ce\") " pod="openshift-dns/dns-default-d474c" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.476017 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1a84b97e-b00f-4665-8266-6a1f6905211d-srv-cert\") pod \"catalog-operator-68c6474976-d5v66\" (UID: \"1a84b97e-b00f-4665-8266-6a1f6905211d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.476047 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/836a0995-4677-4878-b028-13d90eda9a68-config\") pod \"service-ca-operator-777779d784-9r4jw\" (UID: \"836a0995-4677-4878-b028-13d90eda9a68\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.476073 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sxg7\" (UniqueName: \"kubernetes.io/projected/682c346d-10e9-4029-bced-73873eb7229e-kube-api-access-5sxg7\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.476111 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: E1210 00:32:54.476601 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:54.976582716 +0000 UTC m=+148.054539843 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.482384 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0e9b90ca-0276-4a75-9c52-316c83d42c38-auth-proxy-config\") pod \"machine-approver-56656f9798-4ppp4\" (UID: \"0e9b90ca-0276-4a75-9c52-316c83d42c38\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.484383 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5afcb669-190b-4a5b-89cd-a4919e13488d-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-zk54f\" (UID: \"5afcb669-190b-4a5b-89cd-a4919e13488d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.485141 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-config\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.485769 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2cd58f93-65b6-4467-b204-85e3e71f3e37-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-thg2l\" (UID: \"2cd58f93-65b6-4467-b204-85e3e71f3e37\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.486047 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-registry-tls\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.486401 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/40fa21aa-487a-46a9-a396-25ba52971640-ca-trust-extracted\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.487094 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" podStartSLOduration=124.487070764 podStartE2EDuration="2m4.487070764s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:54.448753718 +0000 UTC m=+147.526710835" watchObservedRunningTime="2025-12-10 00:32:54.487070764 +0000 UTC m=+147.565027881" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.487228 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cd58f93-65b6-4467-b204-85e3e71f3e37-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-thg2l\" (UID: \"2cd58f93-65b6-4467-b204-85e3e71f3e37\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.488591 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5afcb669-190b-4a5b-89cd-a4919e13488d-config\") pod \"kube-controller-manager-operator-78b949d7b-zk54f\" (UID: \"5afcb669-190b-4a5b-89cd-a4919e13488d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.490047 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-oauth-config\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.493143 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-service-ca\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.493403 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-trusted-ca-bundle\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.497978 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/7bdeeac5-9323-4934-bf1a-10bb6c8c6f86-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-vcrdk\" (UID: \"7bdeeac5-9323-4934-bf1a-10bb6c8c6f86\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.502949 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-serving-cert\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.510417 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/40fa21aa-487a-46a9-a396-25ba52971640-installation-pull-secrets\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.510487 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/0e9b90ca-0276-4a75-9c52-316c83d42c38-machine-approver-tls\") pod \"machine-approver-56656f9798-4ppp4\" (UID: \"0e9b90ca-0276-4a75-9c52-316c83d42c38\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.510747 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4jsd\" (UniqueName: \"kubernetes.io/projected/3a10c3ea-6afa-45d0-b42e-9165a5a1b69c-kube-api-access-m4jsd\") pod \"dns-operator-744455d44c-9rlgm\" (UID: \"3a10c3ea-6afa-45d0-b42e-9165a5a1b69c\") " pod="openshift-dns-operator/dns-operator-744455d44c-9rlgm" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.517518 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d976ab75-284d-4613-adcd-4620ceebf209-metrics-certs\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.524891 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/d976ab75-284d-4613-adcd-4620ceebf209-default-certificate\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.528312 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbtmf\" (UniqueName: \"kubernetes.io/projected/faf8c014-cf9a-4495-a008-2f56745b6fab-kube-api-access-tbtmf\") pod \"downloads-7954f5f757-lx9b2\" (UID: \"faf8c014-cf9a-4495-a008-2f56745b6fab\") " pod="openshift-console/downloads-7954f5f757-lx9b2" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.534988 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/3a10c3ea-6afa-45d0-b42e-9165a5a1b69c-metrics-tls\") pod \"dns-operator-744455d44c-9rlgm\" (UID: \"3a10c3ea-6afa-45d0-b42e-9165a5a1b69c\") " pod="openshift-dns-operator/dns-operator-744455d44c-9rlgm" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.535808 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/d976ab75-284d-4613-adcd-4620ceebf209-stats-auth\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.548816 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zb8b\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-kube-api-access-6zb8b\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.549018 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"f03b384941463ace8b33158b50f370db00960be34e80814b265b7116a3d15442"} Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.551511 4884 generic.go:334] "Generic (PLEG): container finished" podID="25892407-7061-429c-881c-018f0f2e3fff" containerID="91911bb4d1ef1f8b7034c500fce74bb480236f1a5c844f28853bc53a7afd9459" exitCode=0 Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.551561 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" event={"ID":"25892407-7061-429c-881c-018f0f2e3fff","Type":"ContainerDied","Data":"91911bb4d1ef1f8b7034c500fce74bb480236f1a5c844f28853bc53a7afd9459"} Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.564559 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-9rlgm" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.565023 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" event={"ID":"4e7d9725-55d4-4230-a690-6f1a647e353d","Type":"ContainerStarted","Data":"e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38"} Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.565514 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.570067 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" event={"ID":"fd8e23b3-5b64-47bc-903f-4feb12a34389","Type":"ContainerStarted","Data":"2110d68f8d425c90d48bf86fcd9695a1eec68314f46748c9dc48164b71e0e279"} Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.573831 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"49cf56e9036d7a76e0ffa93e48313e82088f4f6037417449c96e6c3a9b4e7e1f"} Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.573883 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f28a23c75a6b4b6554c725835f395d2a0d31a68fc120ff1ae907f99cb0eae299"} Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.574082 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.575794 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bthm\" (UniqueName: \"kubernetes.io/projected/0e9b90ca-0276-4a75-9c52-316c83d42c38-kube-api-access-6bthm\") pod \"machine-approver-56656f9798-4ppp4\" (UID: \"0e9b90ca-0276-4a75-9c52-316c83d42c38\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.575914 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"8f66a4fdfd0a03ec22a659dedb8c21e869a7545756544c012bd854b062ab7f90"} Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.576569 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.576969 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-registration-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577002 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-cncdj\" (UID: \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\") " pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577022 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhncz\" (UniqueName: \"kubernetes.io/projected/5823e457-6c7a-416f-bf0c-33ae321434d0-kube-api-access-nhncz\") pod \"machine-config-server-kjg6s\" (UID: \"5823e457-6c7a-416f-bf0c-33ae321434d0\") " pod="openshift-machine-config-operator/machine-config-server-kjg6s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577050 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f211a330-2aa3-4f37-b1e0-3d198d18fe14-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dz54v\" (UID: \"f211a330-2aa3-4f37-b1e0-3d198d18fe14\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577070 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f211a330-2aa3-4f37-b1e0-3d198d18fe14-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dz54v\" (UID: \"f211a330-2aa3-4f37-b1e0-3d198d18fe14\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577086 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsnhj\" (UniqueName: \"kubernetes.io/projected/d5738350-616d-4dd8-aba6-fadd8b3271ba-kube-api-access-wsnhj\") pod \"package-server-manager-789f6589d5-k28nx\" (UID: \"d5738350-616d-4dd8-aba6-fadd8b3271ba\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577105 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjrlv\" (UniqueName: \"kubernetes.io/projected/c5d91502-3f40-430c-981d-3427daa3dd4e-kube-api-access-bjrlv\") pod \"ingress-canary-m9g2t\" (UID: \"c5d91502-3f40-430c-981d-3427daa3dd4e\") " pod="openshift-ingress-canary/ingress-canary-m9g2t" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577124 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-config-volume\") pod \"collect-profiles-29422110-4mw26\" (UID: \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577141 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drdmc\" (UniqueName: \"kubernetes.io/projected/7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b-kube-api-access-drdmc\") pod \"service-ca-9c57cc56f-j5mbx\" (UID: \"7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577161 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4zpf\" (UniqueName: \"kubernetes.io/projected/83389922-a88d-4598-9d07-83d75e94f161-kube-api-access-x4zpf\") pod \"machine-config-operator-74547568cd-546pp\" (UID: \"83389922-a88d-4598-9d07-83d75e94f161\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577238 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-cncdj\" (UID: \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\") " pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577258 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/64efda08-9d0e-40e7-a5a2-766b13a809a8-apiservice-cert\") pod \"packageserver-d55dfcdfc-4qztz\" (UID: \"64efda08-9d0e-40e7-a5a2-766b13a809a8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577276 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/64efda08-9d0e-40e7-a5a2-766b13a809a8-webhook-cert\") pod \"packageserver-d55dfcdfc-4qztz\" (UID: \"64efda08-9d0e-40e7-a5a2-766b13a809a8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577299 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-mountpoint-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577325 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw894\" (UniqueName: \"kubernetes.io/projected/1555588f-4670-4be8-8e6f-3270e377d2ce-kube-api-access-xw894\") pod \"dns-default-d474c\" (UID: \"1555588f-4670-4be8-8e6f-3270e377d2ce\") " pod="openshift-dns/dns-default-d474c" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577346 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-977d7\" (UniqueName: \"kubernetes.io/projected/03818f02-632d-43da-ab6b-792f3628a645-kube-api-access-977d7\") pod \"multus-admission-controller-857f4d67dd-zkw8s\" (UID: \"03818f02-632d-43da-ab6b-792f3628a645\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zkw8s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577362 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-plugins-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577384 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdnbc\" (UniqueName: \"kubernetes.io/projected/e20684d7-d620-4c84-8883-7b849f8649bc-kube-api-access-qdnbc\") pod \"olm-operator-6b444d44fb-28hlg\" (UID: \"e20684d7-d620-4c84-8883-7b849f8649bc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577408 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfklc\" (UniqueName: \"kubernetes.io/projected/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-kube-api-access-tfklc\") pod \"marketplace-operator-79b997595-cncdj\" (UID: \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\") " pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577448 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1555588f-4670-4be8-8e6f-3270e377d2ce-config-volume\") pod \"dns-default-d474c\" (UID: \"1555588f-4670-4be8-8e6f-3270e377d2ce\") " pod="openshift-dns/dns-default-d474c" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577516 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-csi-data-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577536 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/836a0995-4677-4878-b028-13d90eda9a68-serving-cert\") pod \"service-ca-operator-777779d784-9r4jw\" (UID: \"836a0995-4677-4878-b028-13d90eda9a68\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577550 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/e20684d7-d620-4c84-8883-7b849f8649bc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-28hlg\" (UID: \"e20684d7-d620-4c84-8883-7b849f8649bc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577569 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f211a330-2aa3-4f37-b1e0-3d198d18fe14-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dz54v\" (UID: \"f211a330-2aa3-4f37-b1e0-3d198d18fe14\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577593 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/64efda08-9d0e-40e7-a5a2-766b13a809a8-tmpfs\") pod \"packageserver-d55dfcdfc-4qztz\" (UID: \"64efda08-9d0e-40e7-a5a2-766b13a809a8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: E1210 00:32:54.577643 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:55.077623405 +0000 UTC m=+148.155580522 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577665 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5823e457-6c7a-416f-bf0c-33ae321434d0-certs\") pod \"machine-config-server-kjg6s\" (UID: \"5823e457-6c7a-416f-bf0c-33ae321434d0\") " pod="openshift-machine-config-operator/machine-config-server-kjg6s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577685 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c5d91502-3f40-430c-981d-3427daa3dd4e-cert\") pod \"ingress-canary-m9g2t\" (UID: \"c5d91502-3f40-430c-981d-3427daa3dd4e\") " pod="openshift-ingress-canary/ingress-canary-m9g2t" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577702 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/83389922-a88d-4598-9d07-83d75e94f161-proxy-tls\") pod \"machine-config-operator-74547568cd-546pp\" (UID: \"83389922-a88d-4598-9d07-83d75e94f161\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577719 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b-signing-cabundle\") pod \"service-ca-9c57cc56f-j5mbx\" (UID: \"7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577737 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-socket-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577757 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1555588f-4670-4be8-8e6f-3270e377d2ce-metrics-tls\") pod \"dns-default-d474c\" (UID: \"1555588f-4670-4be8-8e6f-3270e377d2ce\") " pod="openshift-dns/dns-default-d474c" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577775 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1a84b97e-b00f-4665-8266-6a1f6905211d-srv-cert\") pod \"catalog-operator-68c6474976-d5v66\" (UID: \"1a84b97e-b00f-4665-8266-6a1f6905211d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577791 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/836a0995-4677-4878-b028-13d90eda9a68-config\") pod \"service-ca-operator-777779d784-9r4jw\" (UID: \"836a0995-4677-4878-b028-13d90eda9a68\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577808 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sxg7\" (UniqueName: \"kubernetes.io/projected/682c346d-10e9-4029-bced-73873eb7229e-kube-api-access-5sxg7\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577845 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577872 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcrbb\" (UniqueName: \"kubernetes.io/projected/1a84b97e-b00f-4665-8266-6a1f6905211d-kube-api-access-zcrbb\") pod \"catalog-operator-68c6474976-d5v66\" (UID: \"1a84b97e-b00f-4665-8266-6a1f6905211d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577899 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lhl2\" (UniqueName: \"kubernetes.io/projected/64efda08-9d0e-40e7-a5a2-766b13a809a8-kube-api-access-8lhl2\") pod \"packageserver-d55dfcdfc-4qztz\" (UID: \"64efda08-9d0e-40e7-a5a2-766b13a809a8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577920 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/e20684d7-d620-4c84-8883-7b849f8649bc-srv-cert\") pod \"olm-operator-6b444d44fb-28hlg\" (UID: \"e20684d7-d620-4c84-8883-7b849f8649bc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577939 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/83389922-a88d-4598-9d07-83d75e94f161-images\") pod \"machine-config-operator-74547568cd-546pp\" (UID: \"83389922-a88d-4598-9d07-83d75e94f161\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577955 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/03818f02-632d-43da-ab6b-792f3628a645-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-zkw8s\" (UID: \"03818f02-632d-43da-ab6b-792f3628a645\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zkw8s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577972 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzmq9\" (UniqueName: \"kubernetes.io/projected/836a0995-4677-4878-b028-13d90eda9a68-kube-api-access-bzmq9\") pod \"service-ca-operator-777779d784-9r4jw\" (UID: \"836a0995-4677-4878-b028-13d90eda9a68\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577978 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/64efda08-9d0e-40e7-a5a2-766b13a809a8-tmpfs\") pod \"packageserver-d55dfcdfc-4qztz\" (UID: \"64efda08-9d0e-40e7-a5a2-766b13a809a8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.577988 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-secret-volume\") pod \"collect-profiles-29422110-4mw26\" (UID: \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.578015 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1a84b97e-b00f-4665-8266-6a1f6905211d-profile-collector-cert\") pod \"catalog-operator-68c6474976-d5v66\" (UID: \"1a84b97e-b00f-4665-8266-6a1f6905211d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.578043 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7eef9efb-6f3c-4262-8d29-3a871fdbd304-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-shqxq\" (UID: \"7eef9efb-6f3c-4262-8d29-3a871fdbd304\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.578063 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5823e457-6c7a-416f-bf0c-33ae321434d0-node-bootstrap-token\") pod \"machine-config-server-kjg6s\" (UID: \"5823e457-6c7a-416f-bf0c-33ae321434d0\") " pod="openshift-machine-config-operator/machine-config-server-kjg6s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.578079 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22rdd\" (UniqueName: \"kubernetes.io/projected/7eef9efb-6f3c-4262-8d29-3a871fdbd304-kube-api-access-22rdd\") pod \"machine-config-controller-84d6567774-shqxq\" (UID: \"7eef9efb-6f3c-4262-8d29-3a871fdbd304\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.578097 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnk28\" (UniqueName: \"kubernetes.io/projected/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-kube-api-access-lnk28\") pod \"collect-profiles-29422110-4mw26\" (UID: \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.578114 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7eef9efb-6f3c-4262-8d29-3a871fdbd304-proxy-tls\") pod \"machine-config-controller-84d6567774-shqxq\" (UID: \"7eef9efb-6f3c-4262-8d29-3a871fdbd304\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.578129 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/83389922-a88d-4598-9d07-83d75e94f161-auth-proxy-config\") pod \"machine-config-operator-74547568cd-546pp\" (UID: \"83389922-a88d-4598-9d07-83d75e94f161\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.578148 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b-signing-key\") pod \"service-ca-9c57cc56f-j5mbx\" (UID: \"7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.578168 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/d5738350-616d-4dd8-aba6-fadd8b3271ba-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-k28nx\" (UID: \"d5738350-616d-4dd8-aba6-fadd8b3271ba\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.581373 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-secret-volume\") pod \"collect-profiles-29422110-4mw26\" (UID: \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.582401 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-registration-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.586191 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f211a330-2aa3-4f37-b1e0-3d198d18fe14-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dz54v\" (UID: \"f211a330-2aa3-4f37-b1e0-3d198d18fe14\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.587424 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-config-volume\") pod \"collect-profiles-29422110-4mw26\" (UID: \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.588327 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-cncdj\" (UID: \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\") " pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.588388 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-cncdj\" (UID: \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\") " pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:32:54 crc kubenswrapper[4884]: E1210 00:32:54.588828 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:55.088810611 +0000 UTC m=+148.166767728 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.590761 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-plugins-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.595767 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/d5738350-616d-4dd8-aba6-fadd8b3271ba-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-k28nx\" (UID: \"d5738350-616d-4dd8-aba6-fadd8b3271ba\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.596234 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-mountpoint-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.596692 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-csi-data-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.597126 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/682c346d-10e9-4029-bced-73873eb7229e-socket-dir\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.597587 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.597799 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b-signing-cabundle\") pod \"service-ca-9c57cc56f-j5mbx\" (UID: \"7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.598468 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/836a0995-4677-4878-b028-13d90eda9a68-config\") pod \"service-ca-operator-777779d784-9r4jw\" (UID: \"836a0995-4677-4878-b028-13d90eda9a68\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.599586 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfm77\" (UniqueName: \"kubernetes.io/projected/270e1ca3-e108-43c1-be6e-cedfbbebd78c-kube-api-access-cfm77\") pod \"migrator-59844c95c7-mbfs5\" (UID: \"270e1ca3-e108-43c1-be6e-cedfbbebd78c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-mbfs5" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.599935 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1555588f-4670-4be8-8e6f-3270e377d2ce-config-volume\") pod \"dns-default-d474c\" (UID: \"1555588f-4670-4be8-8e6f-3270e377d2ce\") " pod="openshift-dns/dns-default-d474c" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.598509 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/83389922-a88d-4598-9d07-83d75e94f161-images\") pod \"machine-config-operator-74547568cd-546pp\" (UID: \"83389922-a88d-4598-9d07-83d75e94f161\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.603296 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.603382 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.603982 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-lx9b2" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.604683 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/836a0995-4677-4878-b028-13d90eda9a68-serving-cert\") pod \"service-ca-operator-777779d784-9r4jw\" (UID: \"836a0995-4677-4878-b028-13d90eda9a68\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.605352 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/83389922-a88d-4598-9d07-83d75e94f161-auth-proxy-config\") pod \"machine-config-operator-74547568cd-546pp\" (UID: \"83389922-a88d-4598-9d07-83d75e94f161\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.606229 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.607724 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7eef9efb-6f3c-4262-8d29-3a871fdbd304-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-shqxq\" (UID: \"7eef9efb-6f3c-4262-8d29-3a871fdbd304\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.608264 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/e20684d7-d620-4c84-8883-7b849f8649bc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-28hlg\" (UID: \"e20684d7-d620-4c84-8883-7b849f8649bc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.609025 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/83389922-a88d-4598-9d07-83d75e94f161-proxy-tls\") pod \"machine-config-operator-74547568cd-546pp\" (UID: \"83389922-a88d-4598-9d07-83d75e94f161\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.609889 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/e20684d7-d620-4c84-8883-7b849f8649bc-srv-cert\") pod \"olm-operator-6b444d44fb-28hlg\" (UID: \"e20684d7-d620-4c84-8883-7b849f8649bc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.610307 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/64efda08-9d0e-40e7-a5a2-766b13a809a8-apiservice-cert\") pod \"packageserver-d55dfcdfc-4qztz\" (UID: \"64efda08-9d0e-40e7-a5a2-766b13a809a8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.610600 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.615867 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7eef9efb-6f3c-4262-8d29-3a871fdbd304-proxy-tls\") pod \"machine-config-controller-84d6567774-shqxq\" (UID: \"7eef9efb-6f3c-4262-8d29-3a871fdbd304\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.621845 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.631304 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1a84b97e-b00f-4665-8266-6a1f6905211d-srv-cert\") pod \"catalog-operator-68c6474976-d5v66\" (UID: \"1a84b97e-b00f-4665-8266-6a1f6905211d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.635768 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5823e457-6c7a-416f-bf0c-33ae321434d0-certs\") pod \"machine-config-server-kjg6s\" (UID: \"5823e457-6c7a-416f-bf0c-33ae321434d0\") " pod="openshift-machine-config-operator/machine-config-server-kjg6s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.635907 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-mbfs5" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.637130 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/03818f02-632d-43da-ab6b-792f3628a645-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-zkw8s\" (UID: \"03818f02-632d-43da-ab6b-792f3628a645\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zkw8s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.637267 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b-signing-key\") pod \"service-ca-9c57cc56f-j5mbx\" (UID: \"7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.637318 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/64efda08-9d0e-40e7-a5a2-766b13a809a8-webhook-cert\") pod \"packageserver-d55dfcdfc-4qztz\" (UID: \"64efda08-9d0e-40e7-a5a2-766b13a809a8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.637388 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c5d91502-3f40-430c-981d-3427daa3dd4e-cert\") pod \"ingress-canary-m9g2t\" (UID: \"c5d91502-3f40-430c-981d-3427daa3dd4e\") " pod="openshift-ingress-canary/ingress-canary-m9g2t" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.637420 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1555588f-4670-4be8-8e6f-3270e377d2ce-metrics-tls\") pod \"dns-default-d474c\" (UID: \"1555588f-4670-4be8-8e6f-3270e377d2ce\") " pod="openshift-dns/dns-default-d474c" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.647686 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9g82\" (UniqueName: \"kubernetes.io/projected/d976ab75-284d-4613-adcd-4620ceebf209-kube-api-access-j9g82\") pod \"router-default-5444994796-dgdnq\" (UID: \"d976ab75-284d-4613-adcd-4620ceebf209\") " pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.647810 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.649199 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1a84b97e-b00f-4665-8266-6a1f6905211d-profile-collector-cert\") pod \"catalog-operator-68c6474976-d5v66\" (UID: \"1a84b97e-b00f-4665-8266-6a1f6905211d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.653906 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5823e457-6c7a-416f-bf0c-33ae321434d0-node-bootstrap-token\") pod \"machine-config-server-kjg6s\" (UID: \"5823e457-6c7a-416f-bf0c-33ae321434d0\") " pod="openshift-machine-config-operator/machine-config-server-kjg6s" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.659261 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f211a330-2aa3-4f37-b1e0-3d198d18fe14-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dz54v\" (UID: \"f211a330-2aa3-4f37-b1e0-3d198d18fe14\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.666419 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5afcb669-190b-4a5b-89cd-a4919e13488d-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-zk54f\" (UID: \"5afcb669-190b-4a5b-89cd-a4919e13488d\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.696098 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-bound-sa-token\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.717792 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbjsz\" (UniqueName: \"kubernetes.io/projected/7bdeeac5-9323-4934-bf1a-10bb6c8c6f86-kube-api-access-rbjsz\") pod \"control-plane-machine-set-operator-78cbb6b69f-vcrdk\" (UID: \"7bdeeac5-9323-4934-bf1a-10bb6c8c6f86\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.719531 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dr6c4\" (UniqueName: \"kubernetes.io/projected/2cd58f93-65b6-4467-b204-85e3e71f3e37-kube-api-access-dr6c4\") pod \"kube-storage-version-migrator-operator-b67b599dd-thg2l\" (UID: \"2cd58f93-65b6-4467-b204-85e3e71f3e37\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.720195 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:54 crc kubenswrapper[4884]: E1210 00:32:54.720949 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:55.220926444 +0000 UTC m=+148.298883561 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.721179 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: E1210 00:32:54.734512 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:55.234496464 +0000 UTC m=+148.312453581 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.749448 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-wsf4w"] Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.761185 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzdrj\" (UniqueName: \"kubernetes.io/projected/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-kube-api-access-tzdrj\") pod \"console-f9d7485db-5hrps\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.775105 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f211a330-2aa3-4f37-b1e0-3d198d18fe14-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-dz54v\" (UID: \"f211a330-2aa3-4f37-b1e0-3d198d18fe14\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.788105 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsnhj\" (UniqueName: \"kubernetes.io/projected/d5738350-616d-4dd8-aba6-fadd8b3271ba-kube-api-access-wsnhj\") pod \"package-server-manager-789f6589d5-k28nx\" (UID: \"d5738350-616d-4dd8-aba6-fadd8b3271ba\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.800511 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjrlv\" (UniqueName: \"kubernetes.io/projected/c5d91502-3f40-430c-981d-3427daa3dd4e-kube-api-access-bjrlv\") pod \"ingress-canary-m9g2t\" (UID: \"c5d91502-3f40-430c-981d-3427daa3dd4e\") " pod="openshift-ingress-canary/ingress-canary-m9g2t" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.807955 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drdmc\" (UniqueName: \"kubernetes.io/projected/7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b-kube-api-access-drdmc\") pod \"service-ca-9c57cc56f-j5mbx\" (UID: \"7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b\") " pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.823900 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:54 crc kubenswrapper[4884]: E1210 00:32:54.824498 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:55.32447346 +0000 UTC m=+148.402430587 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.825023 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4zpf\" (UniqueName: \"kubernetes.io/projected/83389922-a88d-4598-9d07-83d75e94f161-kube-api-access-x4zpf\") pod \"machine-config-operator-74547568cd-546pp\" (UID: \"83389922-a88d-4598-9d07-83d75e94f161\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.843072 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-m9g2t" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.855956 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sxg7\" (UniqueName: \"kubernetes.io/projected/682c346d-10e9-4029-bced-73873eb7229e-kube-api-access-5sxg7\") pod \"csi-hostpathplugin-gg998\" (UID: \"682c346d-10e9-4029-bced-73873eb7229e\") " pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.857739 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.870606 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.873790 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcrbb\" (UniqueName: \"kubernetes.io/projected/1a84b97e-b00f-4665-8266-6a1f6905211d-kube-api-access-zcrbb\") pod \"catalog-operator-68c6474976-d5v66\" (UID: \"1a84b97e-b00f-4665-8266-6a1f6905211d\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.889466 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lhl2\" (UniqueName: \"kubernetes.io/projected/64efda08-9d0e-40e7-a5a2-766b13a809a8-kube-api-access-8lhl2\") pod \"packageserver-d55dfcdfc-4qztz\" (UID: \"64efda08-9d0e-40e7-a5a2-766b13a809a8\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.902487 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.905395 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfklc\" (UniqueName: \"kubernetes.io/projected/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-kube-api-access-tfklc\") pod \"marketplace-operator-79b997595-cncdj\" (UID: \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\") " pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.926462 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:54 crc kubenswrapper[4884]: E1210 00:32:54.926974 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:55.426957117 +0000 UTC m=+148.504914244 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.929658 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.949916 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdnbc\" (UniqueName: \"kubernetes.io/projected/e20684d7-d620-4c84-8883-7b849f8649bc-kube-api-access-qdnbc\") pod \"olm-operator-6b444d44fb-28hlg\" (UID: \"e20684d7-d620-4c84-8883-7b849f8649bc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.953820 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.955132 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw894\" (UniqueName: \"kubernetes.io/projected/1555588f-4670-4be8-8e6f-3270e377d2ce-kube-api-access-xw894\") pod \"dns-default-d474c\" (UID: \"1555588f-4670-4be8-8e6f-3270e377d2ce\") " pod="openshift-dns/dns-default-d474c" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.964600 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.971478 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.976980 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22rdd\" (UniqueName: \"kubernetes.io/projected/7eef9efb-6f3c-4262-8d29-3a871fdbd304-kube-api-access-22rdd\") pod \"machine-config-controller-84d6567774-shqxq\" (UID: \"7eef9efb-6f3c-4262-8d29-3a871fdbd304\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.979339 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.986502 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" Dec 10 00:32:54 crc kubenswrapper[4884]: I1210 00:32:54.992550 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:54.999739 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzmq9\" (UniqueName: \"kubernetes.io/projected/836a0995-4677-4878-b028-13d90eda9a68-kube-api-access-bzmq9\") pod \"service-ca-operator-777779d784-9r4jw\" (UID: \"836a0995-4677-4878-b028-13d90eda9a68\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.007728 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.020678 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhncz\" (UniqueName: \"kubernetes.io/projected/5823e457-6c7a-416f-bf0c-33ae321434d0-kube-api-access-nhncz\") pod \"machine-config-server-kjg6s\" (UID: \"5823e457-6c7a-416f-bf0c-33ae321434d0\") " pod="openshift-machine-config-operator/machine-config-server-kjg6s" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.027803 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:55 crc kubenswrapper[4884]: E1210 00:32:55.028383 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:55.528365576 +0000 UTC m=+148.606322693 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.031025 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnk28\" (UniqueName: \"kubernetes.io/projected/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-kube-api-access-lnk28\") pod \"collect-profiles-29422110-4mw26\" (UID: \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.053745 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.057543 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.072279 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-977d7\" (UniqueName: \"kubernetes.io/projected/03818f02-632d-43da-ab6b-792f3628a645-kube-api-access-977d7\") pod \"multus-admission-controller-857f4d67dd-zkw8s\" (UID: \"03818f02-632d-43da-ab6b-792f3628a645\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-zkw8s" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.084348 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.084902 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-d474c" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.089968 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.119823 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-gg998" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.128456 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-kjg6s" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.129365 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:55 crc kubenswrapper[4884]: E1210 00:32:55.130206 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:55.630193465 +0000 UTC m=+148.708150582 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.231214 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:55 crc kubenswrapper[4884]: E1210 00:32:55.231334 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:55.731312116 +0000 UTC m=+148.809269233 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.231457 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:55 crc kubenswrapper[4884]: E1210 00:32:55.231760 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:55.731751949 +0000 UTC m=+148.809709066 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.299497 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-zkw8s" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.335547 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:55 crc kubenswrapper[4884]: E1210 00:32:55.338547 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:55.838512889 +0000 UTC m=+148.916470006 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.392664 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt"] Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.393070 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-wpwx7"] Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.436487 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:55 crc kubenswrapper[4884]: E1210 00:32:55.436850 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:55.936834196 +0000 UTC m=+149.014791313 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.538252 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:55 crc kubenswrapper[4884]: E1210 00:32:55.538788 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:56.038763818 +0000 UTC m=+149.116720965 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.629632 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" event={"ID":"25892407-7061-429c-881c-018f0f2e3fff","Type":"ContainerStarted","Data":"77646eae60a002023b5fbfc034d0494eed2792fa8970169508d9300f49eed366"} Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.630980 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-wsf4w" event={"ID":"c815bdba-e39d-4b49-a85f-28a1e17cfc3c","Type":"ContainerStarted","Data":"16ecd95dfd9ea3f6328a429fe4ade06e359dea63e7477adbc0c1eb63bb827bfe"} Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.631308 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th"] Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.632881 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t"] Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.632909 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-dgdnq" event={"ID":"d976ab75-284d-4613-adcd-4620ceebf209","Type":"ContainerStarted","Data":"d20e49ea976c9b0f538667231a2a796c26c12f37541a7d9d4ebd03615c308f50"} Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.632926 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-dgdnq" event={"ID":"d976ab75-284d-4613-adcd-4620ceebf209","Type":"ContainerStarted","Data":"36df5245667d688f0db788ee83411282d7b4e2e1556989c953dcaab336096307"} Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.634872 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" event={"ID":"0e9b90ca-0276-4a75-9c52-316c83d42c38","Type":"ContainerStarted","Data":"5493d7c2bbedd805f9b9249aab3a0b229e1fdb8274c44e9788b2562827893ca1"} Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.634943 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" event={"ID":"0e9b90ca-0276-4a75-9c52-316c83d42c38","Type":"ContainerStarted","Data":"450d03aef46e93e0238fa1e10a8eee68953cb8680cf4fddfbd91a758f293dea7"} Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.637673 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-kjg6s" event={"ID":"5823e457-6c7a-416f-bf0c-33ae321434d0","Type":"ContainerStarted","Data":"835baba135e26cb87eac22033706f61350af24b8cf274803400682a561f23bac"} Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.643071 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:55 crc kubenswrapper[4884]: E1210 00:32:55.643374 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:56.143361941 +0000 UTC m=+149.221319058 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.644474 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg"] Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.646122 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc"] Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.648946 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.727337 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf"] Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.744515 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:55 crc kubenswrapper[4884]: E1210 00:32:55.745808 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:56.245792518 +0000 UTC m=+149.323749635 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.826480 4884 patch_prober.go:28] interesting pod/router-default-5444994796-dgdnq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 00:32:55 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Dec 10 00:32:55 crc kubenswrapper[4884]: [+]process-running ok Dec 10 00:32:55 crc kubenswrapper[4884]: healthz check failed Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.826807 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dgdnq" podUID="d976ab75-284d-4613-adcd-4620ceebf209" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.854840 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:55 crc kubenswrapper[4884]: E1210 00:32:55.859075 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:56.35905977 +0000 UTC m=+149.437016877 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.913036 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" podStartSLOduration=125.913009951 podStartE2EDuration="2m5.913009951s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:55.912463247 +0000 UTC m=+148.990420374" watchObservedRunningTime="2025-12-10 00:32:55.913009951 +0000 UTC m=+148.990967068" Dec 10 00:32:55 crc kubenswrapper[4884]: I1210 00:32:55.955896 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:55 crc kubenswrapper[4884]: E1210 00:32:55.956274 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:56.456257098 +0000 UTC m=+149.534214215 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.060964 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:56 crc kubenswrapper[4884]: E1210 00:32:56.061503 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:56.561476958 +0000 UTC m=+149.639434075 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.067157 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq"] Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.140777 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-pruner-29422080-pq6kj" podStartSLOduration=127.140749219 podStartE2EDuration="2m7.140749219s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:56.140333119 +0000 UTC m=+149.218290256" watchObservedRunningTime="2025-12-10 00:32:56.140749219 +0000 UTC m=+149.218706326" Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.171815 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:56 crc kubenswrapper[4884]: E1210 00:32:56.172158 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:56.672142892 +0000 UTC m=+149.750100009 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.273818 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:56 crc kubenswrapper[4884]: E1210 00:32:56.274277 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:56.77426616 +0000 UTC m=+149.852223277 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.395075 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:56 crc kubenswrapper[4884]: E1210 00:32:56.395807 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:56.895792822 +0000 UTC m=+149.973749939 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.630845 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:56 crc kubenswrapper[4884]: E1210 00:32:56.647917 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:57.147885216 +0000 UTC m=+150.225842333 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.685725 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" event={"ID":"18cfe787-4f63-47c1-a563-d217e266d468","Type":"ContainerStarted","Data":"c3fc1b60b1a135239fe58c81bdb85248efa6e6e665e2ffaa170b025b1eaa7b76"} Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.693747 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" event={"ID":"55764537-dbcc-4a71-8dbd-42cb335e045a","Type":"ContainerStarted","Data":"faab6b73c982c00d43dcc05916ba791553d4fc0f2e906266b033c7698b24d3f0"} Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.693797 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" event={"ID":"55764537-dbcc-4a71-8dbd-42cb335e045a","Type":"ContainerStarted","Data":"e2294ace8b94aeeb9198b333ec399b2059add8f2b8391bc0e0fbd4a62f24bc6d"} Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.715969 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" event={"ID":"17683ffc-bd60-4096-80d0-5692bcc71422","Type":"ContainerStarted","Data":"bbc06050f724b9d2e7773344ff9ed4f56cba991cf7f81beba7675b7383f01142"} Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.716035 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" event={"ID":"17683ffc-bd60-4096-80d0-5692bcc71422","Type":"ContainerStarted","Data":"fa04232837df23fb6c5459c5b057c3e4b7490d3a9cf9a1b9d4c1568d5bbd963a"} Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.727553 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" event={"ID":"a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100","Type":"ContainerStarted","Data":"db8e6f18130ee52bde9654ed44662770a189e4be3c3a37c5b783bb68216f97e7"} Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.739591 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" event={"ID":"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0","Type":"ContainerStarted","Data":"4a15523d6dfd1298f3dde9037e4ecd700c346c284fb487681526b1e4c4d61f59"} Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.750830 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-kjg6s" event={"ID":"5823e457-6c7a-416f-bf0c-33ae321434d0","Type":"ContainerStarted","Data":"7965c82ca3b2352876e3494e7c6e8e45f24f83a1c68a6c3bfa613dc926b8ca7a"} Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.753288 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-v4mzd" podStartSLOduration=126.75327627 podStartE2EDuration="2m6.75327627s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:56.750543218 +0000 UTC m=+149.828500345" watchObservedRunningTime="2025-12-10 00:32:56.75327627 +0000 UTC m=+149.831233387" Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.754232 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:56 crc kubenswrapper[4884]: E1210 00:32:56.754530 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:57.254514553 +0000 UTC m=+150.332471670 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.757796 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" event={"ID":"25892407-7061-429c-881c-018f0f2e3fff","Type":"ContainerStarted","Data":"9cf0bbeeed3da63743fb5ec35b855137dbd394a087dd1d7d8644ac11a4a598fd"} Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.765914 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-wsf4w" event={"ID":"c815bdba-e39d-4b49-a85f-28a1e17cfc3c","Type":"ContainerStarted","Data":"8da5dd345dab884769e8e62c5c6d05420b6ef8be268ff9ea712cfda9908926fd"} Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.767363 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.775017 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" event={"ID":"3b41f44d-d610-4a87-8ce0-bfb0764ab749","Type":"ContainerStarted","Data":"ffeb63197e40f6fc625f5230b6fc7a5b63f0492d6e245fec6ced5e2f085ab5f6"} Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.777651 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-dgdnq" podStartSLOduration=126.777637506 podStartE2EDuration="2m6.777637506s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:56.775412477 +0000 UTC m=+149.853369594" watchObservedRunningTime="2025-12-10 00:32:56.777637506 +0000 UTC m=+149.855594623" Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.785525 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" event={"ID":"8db32703-3873-413f-b4b4-e1ab1d68abe8","Type":"ContainerStarted","Data":"ace1c48bfeeb869e6160ebd9ac35d3e859993c0ebb5e80ce38d65f6567d65c7e"} Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.859962 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:56 crc kubenswrapper[4884]: E1210 00:32:56.860262 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:57.360249876 +0000 UTC m=+150.438206983 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.951555 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" podStartSLOduration=127.951536666 podStartE2EDuration="2m7.951536666s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:56.930123059 +0000 UTC m=+150.008080196" watchObservedRunningTime="2025-12-10 00:32:56.951536666 +0000 UTC m=+150.029493773" Dec 10 00:32:56 crc kubenswrapper[4884]: I1210 00:32:56.960512 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:56 crc kubenswrapper[4884]: E1210 00:32:56.960747 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:57.460731271 +0000 UTC m=+150.538688388 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.017596 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-wpwx7" podStartSLOduration=128.017580838 podStartE2EDuration="2m8.017580838s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:57.014783644 +0000 UTC m=+150.092740761" watchObservedRunningTime="2025-12-10 00:32:57.017580838 +0000 UTC m=+150.095537955" Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.035419 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-wsf4w" podStartSLOduration=127.03539505 podStartE2EDuration="2m7.03539505s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:57.034994039 +0000 UTC m=+150.112951156" watchObservedRunningTime="2025-12-10 00:32:57.03539505 +0000 UTC m=+150.113352167" Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.061802 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:57 crc kubenswrapper[4884]: E1210 00:32:57.062178 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:57.56216713 +0000 UTC m=+150.640124247 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.073966 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-kjg6s" podStartSLOduration=6.073948622 podStartE2EDuration="6.073948622s" podCreationTimestamp="2025-12-10 00:32:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:57.051483707 +0000 UTC m=+150.129440824" watchObservedRunningTime="2025-12-10 00:32:57.073948622 +0000 UTC m=+150.151905739" Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.075730 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" podStartSLOduration=128.07572602 podStartE2EDuration="2m8.07572602s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:57.073276315 +0000 UTC m=+150.151233462" watchObservedRunningTime="2025-12-10 00:32:57.07572602 +0000 UTC m=+150.153683137" Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.143241 4884 patch_prober.go:28] interesting pod/router-default-5444994796-dgdnq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 00:32:57 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Dec 10 00:32:57 crc kubenswrapper[4884]: [+]process-running ok Dec 10 00:32:57 crc kubenswrapper[4884]: healthz check failed Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.143299 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dgdnq" podUID="d976ab75-284d-4613-adcd-4620ceebf209" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.166992 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:57 crc kubenswrapper[4884]: E1210 00:32:57.167196 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:57.667181874 +0000 UTC m=+150.745138991 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.213744 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-wsf4w" Dec 10 00:32:57 crc kubenswrapper[4884]: E1210 00:32:57.274945 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:57.774928551 +0000 UTC m=+150.852885668 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.275275 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.376323 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:57 crc kubenswrapper[4884]: E1210 00:32:57.376562 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:57.876421232 +0000 UTC m=+150.954378349 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.376584 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:57 crc kubenswrapper[4884]: E1210 00:32:57.376916 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:57.876908915 +0000 UTC m=+150.954866022 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.478257 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:57 crc kubenswrapper[4884]: E1210 00:32:57.479816 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:57.979785513 +0000 UTC m=+151.057742630 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.539620 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5hrps"] Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.567392 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz"] Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.578450 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-mbfs5"] Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.587985 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:57 crc kubenswrapper[4884]: E1210 00:32:57.589183 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:58.089156002 +0000 UTC m=+151.167113119 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.591727 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-lx9b2"] Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.661106 4884 patch_prober.go:28] interesting pod/router-default-5444994796-dgdnq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 00:32:57 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Dec 10 00:32:57 crc kubenswrapper[4884]: [+]process-running ok Dec 10 00:32:57 crc kubenswrapper[4884]: healthz check failed Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.661190 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dgdnq" podUID="d976ab75-284d-4613-adcd-4620ceebf209" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.692141 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:57 crc kubenswrapper[4884]: E1210 00:32:57.692528 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:58.192512113 +0000 UTC m=+151.270469230 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.793296 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:57 crc kubenswrapper[4884]: E1210 00:32:57.793648 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:58.293636044 +0000 UTC m=+151.371593161 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.800039 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9rlgm"] Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.800098 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xkhl5"] Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.809816 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc"] Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.812705 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l"] Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.817804 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" event={"ID":"64efda08-9d0e-40e7-a5a2-766b13a809a8","Type":"ContainerStarted","Data":"84ce2dab228cb49a9366d0b5463065f17e0c433a0ebeaa5e1ec553b4906b546b"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.823566 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" event={"ID":"af7457bc-9a2f-4b9a-a641-58ba17f4d08e","Type":"ContainerStarted","Data":"9e8a7b64073b73ff03b8f2c1ebc09cfbe0ad6453de25a6484a0380040e3160a7"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.823598 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" event={"ID":"af7457bc-9a2f-4b9a-a641-58ba17f4d08e","Type":"ContainerStarted","Data":"91038a69ab0c8ae74f1a71bfbb965076ff9ca315a6f207d8a938838df003efe3"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.838049 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f"] Dec 10 00:32:57 crc kubenswrapper[4884]: W1210 00:32:57.850420 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8d422902_7c72_47b8_8c80_c8c7c63d57f7.slice/crio-ca2bfa2d6458cc53c8edc8875f00f2aebb97cca702e2766d1af2f8c28c5457ef WatchSource:0}: Error finding container ca2bfa2d6458cc53c8edc8875f00f2aebb97cca702e2766d1af2f8c28c5457ef: Status 404 returned error can't find the container with id ca2bfa2d6458cc53c8edc8875f00f2aebb97cca702e2766d1af2f8c28c5457ef Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.852753 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-mbfs5" event={"ID":"270e1ca3-e108-43c1-be6e-cedfbbebd78c","Type":"ContainerStarted","Data":"7e15217ff1c13d02cecea8ca71576001eb011ae6d88f24332914faf08f91d7a1"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.857955 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mptkq" podStartSLOduration=128.857937469 podStartE2EDuration="2m8.857937469s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:57.854602501 +0000 UTC m=+150.932559638" watchObservedRunningTime="2025-12-10 00:32:57.857937469 +0000 UTC m=+150.935894586" Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.872252 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-lx9b2" event={"ID":"faf8c014-cf9a-4495-a008-2f56745b6fab","Type":"ContainerStarted","Data":"0c9521f893a79803e7908a47113ff78079f828c86e72717e5ee62efee5d834b2"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.879021 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" event={"ID":"0e9b90ca-0276-4a75-9c52-316c83d42c38","Type":"ContainerStarted","Data":"2f663f0c6a8d23cd847726d21593def765387b79e20c88cd34fc1ba20dcd7a78"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.886010 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" event={"ID":"a7b4c082-4761-4f2b-bb6a-f5e0e5c4f100","Type":"ContainerStarted","Data":"67d398e9042c3491b1b3f6ef73f96efb0321aa7a7a943bad98eebbbad1ac918e"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.896805 4884 generic.go:334] "Generic (PLEG): container finished" podID="18cfe787-4f63-47c1-a563-d217e266d468" containerID="c464026fa1de6157cffa679672ca7c1cfbd3673f6b82a172c08c8e5fc77841af" exitCode=0 Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.896917 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" event={"ID":"18cfe787-4f63-47c1-a563-d217e266d468","Type":"ContainerDied","Data":"c464026fa1de6157cffa679672ca7c1cfbd3673f6b82a172c08c8e5fc77841af"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.897194 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:57 crc kubenswrapper[4884]: E1210 00:32:57.897333 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:58.397313583 +0000 UTC m=+151.475270700 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.897894 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:57 crc kubenswrapper[4884]: E1210 00:32:57.900486 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:58.400465586 +0000 UTC m=+151.478422773 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.901609 4884 generic.go:334] "Generic (PLEG): container finished" podID="8db32703-3873-413f-b4b4-e1ab1d68abe8" containerID="745b36347443855aeb5907adb0f2aa9fba528aa86940b6f93b6e159f4ebd9428" exitCode=0 Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.901664 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" event={"ID":"8db32703-3873-413f-b4b4-e1ab1d68abe8","Type":"ContainerDied","Data":"745b36347443855aeb5907adb0f2aa9fba528aa86940b6f93b6e159f4ebd9428"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.911715 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.911980 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.915960 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-4ppp4" podStartSLOduration=128.915944777 podStartE2EDuration="2m8.915944777s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:57.91530405 +0000 UTC m=+150.993261167" watchObservedRunningTime="2025-12-10 00:32:57.915944777 +0000 UTC m=+150.993901894" Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.920692 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5hrps" event={"ID":"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9","Type":"ContainerStarted","Data":"22a6b40860876c379ee679c335da49b2f29bb3d5823932e71f788bbeb1725d37"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.923765 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" event={"ID":"3b41f44d-d610-4a87-8ce0-bfb0764ab749","Type":"ContainerStarted","Data":"6191948ca236992e905e2e57c02bf2d3afdb8ac4be1230909674f16270d9c0d1"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.933828 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" event={"ID":"55764537-dbcc-4a71-8dbd-42cb335e045a","Type":"ContainerStarted","Data":"52c00eaf59b572ab4b5c6511ccbacfb54daa6b9858f49bee8d1ef97e89c883ec"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.943971 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" event={"ID":"13e4d0b1-8d07-4498-b11b-75c6a5d1fbb0","Type":"ContainerStarted","Data":"629d1fc040a3d8973bf31e9dc85799947e9e86fa4234ba184b9dfc70f176a334"} Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.956987 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-j4pqg" podStartSLOduration=127.956966324 podStartE2EDuration="2m7.956966324s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:57.955922187 +0000 UTC m=+151.033879304" watchObservedRunningTime="2025-12-10 00:32:57.956966324 +0000 UTC m=+151.034923441" Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.961024 4884 patch_prober.go:28] interesting pod/apiserver-76f77b778f-4d9rn container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 10 00:32:57 crc kubenswrapper[4884]: [+]log ok Dec 10 00:32:57 crc kubenswrapper[4884]: [+]etcd ok Dec 10 00:32:57 crc kubenswrapper[4884]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 10 00:32:57 crc kubenswrapper[4884]: [+]poststarthook/generic-apiserver-start-informers ok Dec 10 00:32:57 crc kubenswrapper[4884]: [+]poststarthook/max-in-flight-filter ok Dec 10 00:32:57 crc kubenswrapper[4884]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 10 00:32:57 crc kubenswrapper[4884]: [+]poststarthook/image.openshift.io-apiserver-caches ok Dec 10 00:32:57 crc kubenswrapper[4884]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Dec 10 00:32:57 crc kubenswrapper[4884]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Dec 10 00:32:57 crc kubenswrapper[4884]: [+]poststarthook/project.openshift.io-projectcache ok Dec 10 00:32:57 crc kubenswrapper[4884]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Dec 10 00:32:57 crc kubenswrapper[4884]: [+]poststarthook/openshift.io-startinformers ok Dec 10 00:32:57 crc kubenswrapper[4884]: [+]poststarthook/openshift.io-restmapperupdater ok Dec 10 00:32:57 crc kubenswrapper[4884]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Dec 10 00:32:57 crc kubenswrapper[4884]: livez check failed Dec 10 00:32:57 crc kubenswrapper[4884]: I1210 00:32:57.961082 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" podUID="25892407-7061-429c-881c-018f0f2e3fff" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.003886 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:58 crc kubenswrapper[4884]: E1210 00:32:58.009797 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:58.509761644 +0000 UTC m=+151.587718941 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.023756 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-5hrps" podStartSLOduration=128.023732045 podStartE2EDuration="2m8.023732045s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:58.023124459 +0000 UTC m=+151.101081586" watchObservedRunningTime="2025-12-10 00:32:58.023732045 +0000 UTC m=+151.101689162" Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.050105 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.050157 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.053939 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.056225 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.069840 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-d474c"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.080879 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.081976 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v8xwt" podStartSLOduration=128.081962499 podStartE2EDuration="2m8.081962499s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:58.060961882 +0000 UTC m=+151.138918999" watchObservedRunningTime="2025-12-10 00:32:58.081962499 +0000 UTC m=+151.159919616" Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.101412 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.107743 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.114421 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.114490 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-m9g2t"] Dec 10 00:32:58 crc kubenswrapper[4884]: E1210 00:32:58.115962 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:58.6159454 +0000 UTC m=+151.693902517 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.155462 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-546pp"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.183987 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.185696 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-gg998"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.190474 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-m64th" podStartSLOduration=128.190452016 podStartE2EDuration="2m8.190452016s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:58.09481161 +0000 UTC m=+151.172768747" watchObservedRunningTime="2025-12-10 00:32:58.190452016 +0000 UTC m=+151.268409133" Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.191812 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cncdj"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.204123 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-j5mbx"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.207113 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mb82t" podStartSLOduration=128.207093737 podStartE2EDuration="2m8.207093737s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:58.136555416 +0000 UTC m=+151.214512553" watchObservedRunningTime="2025-12-10 00:32:58.207093737 +0000 UTC m=+151.285050854" Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.216618 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:58 crc kubenswrapper[4884]: E1210 00:32:58.217286 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:58.717267516 +0000 UTC m=+151.795224633 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.220274 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-zkw8s"] Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.321833 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:58 crc kubenswrapper[4884]: E1210 00:32:58.323598 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:58.823581125 +0000 UTC m=+151.901538232 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:58 crc kubenswrapper[4884]: W1210 00:32:58.326787 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7a7fb0cf_3aa8_41b2_8c88_330cffb08e8b.slice/crio-f9ca57ab6fd71143496efc8218ce9688809dac831f0dc0c67f2d36359fbc7183 WatchSource:0}: Error finding container f9ca57ab6fd71143496efc8218ce9688809dac831f0dc0c67f2d36359fbc7183: Status 404 returned error can't find the container with id f9ca57ab6fd71143496efc8218ce9688809dac831f0dc0c67f2d36359fbc7183 Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.423089 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:58 crc kubenswrapper[4884]: E1210 00:32:58.423830 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:58.923803832 +0000 UTC m=+152.001761019 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.526158 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:58 crc kubenswrapper[4884]: E1210 00:32:58.526637 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:59.026620789 +0000 UTC m=+152.104577906 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.627171 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:58 crc kubenswrapper[4884]: E1210 00:32:58.627506 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:59.127486633 +0000 UTC m=+152.205443750 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.655493 4884 patch_prober.go:28] interesting pod/router-default-5444994796-dgdnq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 00:32:58 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Dec 10 00:32:58 crc kubenswrapper[4884]: [+]process-running ok Dec 10 00:32:58 crc kubenswrapper[4884]: healthz check failed Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.655548 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dgdnq" podUID="d976ab75-284d-4613-adcd-4620ceebf209" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.729403 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:58 crc kubenswrapper[4884]: E1210 00:32:58.730071 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:59.230055782 +0000 UTC m=+152.308012899 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.832795 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:58 crc kubenswrapper[4884]: E1210 00:32:58.833701 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:59.33368778 +0000 UTC m=+152.411644897 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:58 crc kubenswrapper[4884]: I1210 00:32:58.935336 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:58 crc kubenswrapper[4884]: E1210 00:32:58.935681 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:59.435668814 +0000 UTC m=+152.513625931 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.005378 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" event={"ID":"d5738350-616d-4dd8-aba6-fadd8b3271ba","Type":"ContainerStarted","Data":"ce43a0ca88e567c182c8f1dc7d657426b9a51503c5bb4a7a1deb4262a91abbef"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.005424 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" event={"ID":"d5738350-616d-4dd8-aba6-fadd8b3271ba","Type":"ContainerStarted","Data":"27468f4134bd6ba78684d587d5839eb00c66847a234570c4e93edf86ec003923"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.009300 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-zkw8s" event={"ID":"03818f02-632d-43da-ab6b-792f3628a645","Type":"ContainerStarted","Data":"990cf546f35ccf6cef32e30e76489676d01177f90acab857a87f8dd592099307"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.011136 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" event={"ID":"e20684d7-d620-4c84-8883-7b849f8649bc","Type":"ContainerStarted","Data":"b1a5b655b4c0fa7409638534fe4f90d8105d5c98249d9239d96a986603b78fd3"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.012852 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-d474c" event={"ID":"1555588f-4670-4be8-8e6f-3270e377d2ce","Type":"ContainerStarted","Data":"9c81a8ef4856f88dcc27be373fd310db72b0080dfe99c299998d1a4fe2ce49d8"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.019653 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" event={"ID":"db8164d6-52ea-4ee2-b307-0acc3cbd72a9","Type":"ContainerStarted","Data":"d4216dfbf87c579f8e22c90114ce1fea65117beffbb4e9151d2585b7f8e625f3"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.021043 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9rlgm" event={"ID":"3a10c3ea-6afa-45d0-b42e-9165a5a1b69c","Type":"ContainerStarted","Data":"0877aa46894f55e17456ed664034377d3282e71ffd91013dfdb460c8fdf40fde"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.024240 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" event={"ID":"64efda08-9d0e-40e7-a5a2-766b13a809a8","Type":"ContainerStarted","Data":"aa18e17224990f9fe3b1380c3efde929adbd9561d1bc3b7276101a8c4cdd843a"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.024739 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.026627 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" event={"ID":"83389922-a88d-4598-9d07-83d75e94f161","Type":"ContainerStarted","Data":"e37457620001b16cfd851ce0c66c4eab8f45444ee8ea7e8ca6b17e53e246af25"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.027356 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" event={"ID":"1a84b97e-b00f-4665-8266-6a1f6905211d","Type":"ContainerStarted","Data":"c6cd86c081952b8388c17078aa4fbedd5ba3ae04c3f40021337b367c8f336efa"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.033331 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" event={"ID":"5afcb669-190b-4a5b-89cd-a4919e13488d","Type":"ContainerStarted","Data":"b26f70ca5b3bac1dc9a4b33f6b040d8596af5b2e0f8d5d7330f8cda9705c19d6"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.037745 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:59 crc kubenswrapper[4884]: E1210 00:32:59.038991 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:59.538969013 +0000 UTC m=+152.616926130 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.046799 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" event={"ID":"7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b","Type":"ContainerStarted","Data":"f9ca57ab6fd71143496efc8218ce9688809dac831f0dc0c67f2d36359fbc7183"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.049592 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-gg998" event={"ID":"682c346d-10e9-4029-bced-73873eb7229e","Type":"ContainerStarted","Data":"9ee4545f73b3f2da32f610544597b714deb8aac01b3b6364296eafc281cadacc"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.061691 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" podStartSLOduration=129.061663065 podStartE2EDuration="2m9.061663065s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:59.060481853 +0000 UTC m=+152.138438980" watchObservedRunningTime="2025-12-10 00:32:59.061663065 +0000 UTC m=+152.139620182" Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.064946 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" event={"ID":"2cd58f93-65b6-4467-b204-85e3e71f3e37","Type":"ContainerStarted","Data":"ffc7428a49a3a96ea30fdde7ba344605bca7241fe51649f34ca167e671b260bf"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.064995 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" event={"ID":"2cd58f93-65b6-4467-b204-85e3e71f3e37","Type":"ContainerStarted","Data":"99751b65abb5073868a54740834cf3735a6dd6dd72ce18407da8052c3d161445"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.084016 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc" event={"ID":"dce034f5-3560-4de1-8922-ae8f80ea0fac","Type":"ContainerStarted","Data":"84ccc56cb15b2e12adade62308503d29aa64cd8705d816f46775ceb0988d6994"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.089635 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-thg2l" podStartSLOduration=129.089623296 podStartE2EDuration="2m9.089623296s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:59.087826898 +0000 UTC m=+152.165784015" watchObservedRunningTime="2025-12-10 00:32:59.089623296 +0000 UTC m=+152.167580413" Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.108761 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" event={"ID":"18cfe787-4f63-47c1-a563-d217e266d468","Type":"ContainerStarted","Data":"ea74c9f87cf650881eb6e6834b49f763b2ded7a7ea0deec01f50a10a00a5e495"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.108847 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.135760 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" podStartSLOduration=130.135734438 podStartE2EDuration="2m10.135734438s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:59.131427155 +0000 UTC m=+152.209384302" watchObservedRunningTime="2025-12-10 00:32:59.135734438 +0000 UTC m=+152.213691555" Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.138249 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-m9g2t" event={"ID":"c5d91502-3f40-430c-981d-3427daa3dd4e","Type":"ContainerStarted","Data":"0b72235f178150adb35d647ef477e56281c83324be920c3b3e05a83e396d792b"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.139601 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:59 crc kubenswrapper[4884]: E1210 00:32:59.140090 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:59.640075944 +0000 UTC m=+152.718033051 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.155097 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5hrps" event={"ID":"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9","Type":"ContainerStarted","Data":"4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.156601 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" event={"ID":"f211a330-2aa3-4f37-b1e0-3d198d18fe14","Type":"ContainerStarted","Data":"804996f6a3daf50f7f4a9e492c7c8f8c96a1cd476872936d6bc7b25ac240c209"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.157374 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-lx9b2" event={"ID":"faf8c014-cf9a-4495-a008-2f56745b6fab","Type":"ContainerStarted","Data":"9443d4efad009d8d649303ee504e255368e7b3a8391ba8b863dae45c6a070f0a"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.158159 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-lx9b2" Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.160196 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" event={"ID":"836a0995-4677-4878-b028-13d90eda9a68","Type":"ContainerStarted","Data":"b4e431704e72ac30b65705372738ba93a280c1c17504381c962e02e63d0d0ee6"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.162487 4884 patch_prober.go:28] interesting pod/downloads-7954f5f757-lx9b2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.162686 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-lx9b2" podUID="faf8c014-cf9a-4495-a008-2f56745b6fab" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.173205 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" event={"ID":"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84","Type":"ContainerStarted","Data":"6b7fba3b510ce251220c117afba98a7a7be56e0c97f00a7805caf5d816fefb81"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.181526 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-lx9b2" podStartSLOduration=129.181510802 podStartE2EDuration="2m9.181510802s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:59.180899456 +0000 UTC m=+152.258856593" watchObservedRunningTime="2025-12-10 00:32:59.181510802 +0000 UTC m=+152.259467919" Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.192821 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-mbfs5" event={"ID":"270e1ca3-e108-43c1-be6e-cedfbbebd78c","Type":"ContainerStarted","Data":"6bcd3d4516389d54023e3aaf7d46b03f3dc40c405731a7bf43c527f0a59f5bf6"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.192900 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-mbfs5" event={"ID":"270e1ca3-e108-43c1-be6e-cedfbbebd78c","Type":"ContainerStarted","Data":"c5dc9edd761387eb3991aed243e03481ed0e27f8dd06b087bb34b4b828ab70e4"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.210694 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk" event={"ID":"7bdeeac5-9323-4934-bf1a-10bb6c8c6f86","Type":"ContainerStarted","Data":"2258cb553228fc52f3980cc24b283ae8b80987510bd24cc5c1f5740efca04c59"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.214389 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-mbfs5" podStartSLOduration=129.214370963 podStartE2EDuration="2m9.214370963s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:59.212843783 +0000 UTC m=+152.290800900" watchObservedRunningTime="2025-12-10 00:32:59.214370963 +0000 UTC m=+152.292328080" Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.227724 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" event={"ID":"8d422902-7c72-47b8-8c80-c8c7c63d57f7","Type":"ContainerStarted","Data":"0c1b9da2033be621c5b98ba8493683766063fc587843be2395453275bfc54242"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.227789 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" event={"ID":"8d422902-7c72-47b8-8c80-c8c7c63d57f7","Type":"ContainerStarted","Data":"ca2bfa2d6458cc53c8edc8875f00f2aebb97cca702e2766d1af2f8c28c5457ef"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.232465 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" event={"ID":"7eef9efb-6f3c-4262-8d29-3a871fdbd304","Type":"ContainerStarted","Data":"760f2d841871a488ca843e12effe08640d893ae032ecd522317d06e94712d0d1"} Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.240472 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:59 crc kubenswrapper[4884]: E1210 00:32:59.252290 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:59.752255758 +0000 UTC m=+152.830212875 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.262614 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-xkhl5" podStartSLOduration=129.262593841 podStartE2EDuration="2m9.262593841s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:32:59.257097726 +0000 UTC m=+152.335054853" watchObservedRunningTime="2025-12-10 00:32:59.262593841 +0000 UTC m=+152.340550958" Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.341894 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-4qztz" Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.351004 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:59 crc kubenswrapper[4884]: E1210 00:32:59.351341 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:32:59.851328094 +0000 UTC m=+152.929285211 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.453695 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:59 crc kubenswrapper[4884]: E1210 00:32:59.454545 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:32:59.954522331 +0000 UTC m=+153.032479448 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.556927 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:59 crc kubenswrapper[4884]: E1210 00:32:59.557311 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.057286056 +0000 UTC m=+153.135243173 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.660782 4884 patch_prober.go:28] interesting pod/router-default-5444994796-dgdnq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 00:32:59 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Dec 10 00:32:59 crc kubenswrapper[4884]: [+]process-running ok Dec 10 00:32:59 crc kubenswrapper[4884]: healthz check failed Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.661327 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dgdnq" podUID="d976ab75-284d-4613-adcd-4620ceebf209" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.661066 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:59 crc kubenswrapper[4884]: E1210 00:32:59.661151 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.161126559 +0000 UTC m=+153.239083676 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.662033 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:59 crc kubenswrapper[4884]: E1210 00:32:59.662484 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.162470174 +0000 UTC m=+153.240427291 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.765415 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:59 crc kubenswrapper[4884]: E1210 00:32:59.765895 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.265862015 +0000 UTC m=+153.343819132 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.867853 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:32:59 crc kubenswrapper[4884]: E1210 00:32:59.868626 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.36860678 +0000 UTC m=+153.446563897 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:32:59 crc kubenswrapper[4884]: I1210 00:32:59.970547 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:32:59 crc kubenswrapper[4884]: E1210 00:32:59.970911 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.470896321 +0000 UTC m=+153.548853438 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.077286 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:00 crc kubenswrapper[4884]: E1210 00:33:00.077672 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.577656042 +0000 UTC m=+153.655613159 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.178502 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:33:00 crc kubenswrapper[4884]: E1210 00:33:00.178705 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.67868312 +0000 UTC m=+153.756640237 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.179071 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:00 crc kubenswrapper[4884]: E1210 00:33:00.179344 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.679332418 +0000 UTC m=+153.757289535 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.242015 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" event={"ID":"db8164d6-52ea-4ee2-b307-0acc3cbd72a9","Type":"ContainerStarted","Data":"942d31a2ebc9e789159947fa3eccd4cd52e94c6531274973588c05e45ee61931"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.242486 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.254172 4884 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-cncdj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/healthz\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.254239 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" podUID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.26:8080/healthz\": dial tcp 10.217.0.26:8080: connect: connection refused" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.267884 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9rlgm" event={"ID":"3a10c3ea-6afa-45d0-b42e-9165a5a1b69c","Type":"ContainerStarted","Data":"f07812ab971e1638c4e2af8ca9df850dfd808f79432189c69e7d8cae1f96cba5"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.267932 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9rlgm" event={"ID":"3a10c3ea-6afa-45d0-b42e-9165a5a1b69c","Type":"ContainerStarted","Data":"029e2e13d9006b34fd6dbfa9444c634f7928dd2ea937999e808620d995e17118"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.279979 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:33:00 crc kubenswrapper[4884]: E1210 00:33:00.281144 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.781127897 +0000 UTC m=+153.859085014 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.283230 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" event={"ID":"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84","Type":"ContainerStarted","Data":"c1db5762ab4498c062049a31b5b657eb72dbeddf096d15940f898e86c445b03d"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.302339 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" podStartSLOduration=130.30232499 podStartE2EDuration="2m10.30232499s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.274750028 +0000 UTC m=+153.352707145" watchObservedRunningTime="2025-12-10 00:33:00.30232499 +0000 UTC m=+153.380282107" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.314610 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" event={"ID":"5afcb669-190b-4a5b-89cd-a4919e13488d","Type":"ContainerStarted","Data":"757a9a4fbc348f43efcd20ca81e513b8eca8a75cf1cbe3e36990132b282cf51e"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.338548 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-9rlgm" podStartSLOduration=130.338530349 podStartE2EDuration="2m10.338530349s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.304658701 +0000 UTC m=+153.382615818" watchObservedRunningTime="2025-12-10 00:33:00.338530349 +0000 UTC m=+153.416487466" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.364234 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-zk54f" podStartSLOduration=130.36421746 podStartE2EDuration="2m10.36421746s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.363903222 +0000 UTC m=+153.441860349" watchObservedRunningTime="2025-12-10 00:33:00.36421746 +0000 UTC m=+153.442174567" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.366683 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" podStartSLOduration=131.366677426 podStartE2EDuration="2m11.366677426s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.340712237 +0000 UTC m=+153.418669354" watchObservedRunningTime="2025-12-10 00:33:00.366677426 +0000 UTC m=+153.444634543" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.368078 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" event={"ID":"7eef9efb-6f3c-4262-8d29-3a871fdbd304","Type":"ContainerStarted","Data":"627b096e6fdb6534e34a5cec7197afe0c3c72a6841a847dcd44c8c156916db51"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.368132 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" event={"ID":"7eef9efb-6f3c-4262-8d29-3a871fdbd304","Type":"ContainerStarted","Data":"6bed872e122622c41e0d87a46ef70812b90779e303274c4c33686dcc2d3aa50c"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.370375 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" event={"ID":"7a7fb0cf-3aa8-41b2-8c88-330cffb08e8b","Type":"ContainerStarted","Data":"432b9012291c8d7954914ed926c54de3b023b7172e0703e424758102567b7ed0"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.371603 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" event={"ID":"e20684d7-d620-4c84-8883-7b849f8649bc","Type":"ContainerStarted","Data":"f3f785d15636937c9d4a1ed3744debaf9717f1edf5f5bf7ccaf8b8ef0dcf6634"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.372255 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.374273 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" event={"ID":"1a84b97e-b00f-4665-8266-6a1f6905211d","Type":"ContainerStarted","Data":"01b3798d594ba22e23a27737ce2634855fdb3f9b4a1b7126aeedb3db6177dff2"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.374761 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.387679 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:00 crc kubenswrapper[4884]: E1210 00:33:00.389523 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.889507101 +0000 UTC m=+153.967464218 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.390840 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-d474c" event={"ID":"1555588f-4670-4be8-8e6f-3270e377d2ce","Type":"ContainerStarted","Data":"f29ac903768836b84ebcaaf4965118aba8b749f7c529a294e822e6120d9bd902"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.391032 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-d474c" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.397121 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.408385 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-zkw8s" event={"ID":"03818f02-632d-43da-ab6b-792f3628a645","Type":"ContainerStarted","Data":"57fd2382c9fc18cc6448ce04de0bccbe430cf99951f40af65eeb8eedad04f288"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.409847 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" event={"ID":"f211a330-2aa3-4f37-b1e0-3d198d18fe14","Type":"ContainerStarted","Data":"fdeebf2a3c39dcd397d26a80917293c07047e0a51cdefcc16fc7f6b296fbc817"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.409954 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-shqxq" podStartSLOduration=130.409934342 podStartE2EDuration="2m10.409934342s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.408118604 +0000 UTC m=+153.486075721" watchObservedRunningTime="2025-12-10 00:33:00.409934342 +0000 UTC m=+153.487891459" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.412378 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" event={"ID":"83389922-a88d-4598-9d07-83d75e94f161","Type":"ContainerStarted","Data":"b1b23cdfe18879fa0f2fb7621b4d9914f8f75cb0fc9407961713cc78a4e23b63"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.412408 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" event={"ID":"83389922-a88d-4598-9d07-83d75e94f161","Type":"ContainerStarted","Data":"64657e3a25c6f20a4d7392dadd6c0fa42af14439be19419c9337a0459ad5dc06"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.416061 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk" event={"ID":"7bdeeac5-9323-4934-bf1a-10bb6c8c6f86","Type":"ContainerStarted","Data":"24fd8d9368a6e75ec376494beb606fb510126a846fc6c01d1969967ba1950dd7"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.417292 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" event={"ID":"836a0995-4677-4878-b028-13d90eda9a68","Type":"ContainerStarted","Data":"faebbc6faa4add5ff05e38cb2adb6cb1926537bd9494a2e3074a372de9f6815a"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.418885 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" event={"ID":"8db32703-3873-413f-b4b4-e1ab1d68abe8","Type":"ContainerStarted","Data":"fd8d27b608582581b91af86dbf11e04a12437d8f1dd10e893755a499c7234dbf"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.420675 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" event={"ID":"d5738350-616d-4dd8-aba6-fadd8b3271ba","Type":"ContainerStarted","Data":"d5d665df13b502ed566c90708b1e9f634e0b40f6720a71f9cc22dac9e0a4a111"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.421067 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.422678 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc" event={"ID":"dce034f5-3560-4de1-8922-ae8f80ea0fac","Type":"ContainerStarted","Data":"4e8d08f2145ae81a8f499bf93b5011c67d3d4dbefdf5150bb73612a455ff6b90"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.422705 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc" event={"ID":"dce034f5-3560-4de1-8922-ae8f80ea0fac","Type":"ContainerStarted","Data":"932c8486298ec6a9d1b126f84d69cb64e1c7a8f4642217b76804fc33ad496d33"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.429321 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-m9g2t" event={"ID":"c5d91502-3f40-430c-981d-3427daa3dd4e","Type":"ContainerStarted","Data":"020618455d4f04a2d6e26de4a903d8059c238a527e22c800817b7c8ac0717416"} Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.432976 4884 patch_prober.go:28] interesting pod/downloads-7954f5f757-lx9b2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.433060 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-lx9b2" podUID="faf8c014-cf9a-4495-a008-2f56745b6fab" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.442194 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.466587 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-28hlg" podStartSLOduration=130.466568014 podStartE2EDuration="2m10.466568014s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.442706952 +0000 UTC m=+153.520664079" watchObservedRunningTime="2025-12-10 00:33:00.466568014 +0000 UTC m=+153.544525131" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.467101 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-j5mbx" podStartSLOduration=130.467094928 podStartE2EDuration="2m10.467094928s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.462297661 +0000 UTC m=+153.540254798" watchObservedRunningTime="2025-12-10 00:33:00.467094928 +0000 UTC m=+153.545052035" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.489326 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:33:00 crc kubenswrapper[4884]: E1210 00:33:00.489573 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.989536383 +0000 UTC m=+154.067493490 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.489707 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:00 crc kubenswrapper[4884]: E1210 00:33:00.492468 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:00.992425519 +0000 UTC m=+154.070382626 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.526251 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-d474c" podStartSLOduration=9.526225665 podStartE2EDuration="9.526225665s" podCreationTimestamp="2025-12-10 00:32:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.50791574 +0000 UTC m=+153.585872857" watchObservedRunningTime="2025-12-10 00:33:00.526225665 +0000 UTC m=+153.604182782" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.563485 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-zkw8s" podStartSLOduration=130.563452213 podStartE2EDuration="2m10.563452213s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.542779824 +0000 UTC m=+153.620736961" watchObservedRunningTime="2025-12-10 00:33:00.563452213 +0000 UTC m=+153.641409350" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.589340 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-d5v66" podStartSLOduration=130.589318449 podStartE2EDuration="2m10.589318449s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.564622924 +0000 UTC m=+153.642580051" watchObservedRunningTime="2025-12-10 00:33:00.589318449 +0000 UTC m=+153.667275566" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.589890 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-546pp" podStartSLOduration=130.589883013 podStartE2EDuration="2m10.589883013s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.586956506 +0000 UTC m=+153.664913623" watchObservedRunningTime="2025-12-10 00:33:00.589883013 +0000 UTC m=+153.667840130" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.594467 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:33:00 crc kubenswrapper[4884]: E1210 00:33:00.594601 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:01.094582008 +0000 UTC m=+154.172539125 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.594887 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:00 crc kubenswrapper[4884]: E1210 00:33:00.595408 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:01.095389959 +0000 UTC m=+154.173347076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.625991 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-vcrdk" podStartSLOduration=130.62597282 podStartE2EDuration="2m10.62597282s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.623921236 +0000 UTC m=+153.701878353" watchObservedRunningTime="2025-12-10 00:33:00.62597282 +0000 UTC m=+153.703929937" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.662677 4884 patch_prober.go:28] interesting pod/router-default-5444994796-dgdnq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 00:33:00 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Dec 10 00:33:00 crc kubenswrapper[4884]: [+]process-running ok Dec 10 00:33:00 crc kubenswrapper[4884]: healthz check failed Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.662744 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dgdnq" podUID="d976ab75-284d-4613-adcd-4620ceebf209" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.671209 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9r4jw" podStartSLOduration=130.671191699 podStartE2EDuration="2m10.671191699s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.667891992 +0000 UTC m=+153.745849109" watchObservedRunningTime="2025-12-10 00:33:00.671191699 +0000 UTC m=+153.749148816" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.700150 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:33:00 crc kubenswrapper[4884]: E1210 00:33:00.700588 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:01.200572949 +0000 UTC m=+154.278530066 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.794452 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-f8rwc" podStartSLOduration=131.794419816 podStartE2EDuration="2m11.794419816s" podCreationTimestamp="2025-12-10 00:30:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.79301083 +0000 UTC m=+153.870967967" watchObservedRunningTime="2025-12-10 00:33:00.794419816 +0000 UTC m=+153.872376933" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.795165 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" podStartSLOduration=130.795159786 podStartE2EDuration="2m10.795159786s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.754013706 +0000 UTC m=+153.831970843" watchObservedRunningTime="2025-12-10 00:33:00.795159786 +0000 UTC m=+153.873116913" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.807305 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:00 crc kubenswrapper[4884]: E1210 00:33:00.807675 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:01.307662858 +0000 UTC m=+154.385619975 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.830835 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-dz54v" podStartSLOduration=130.830816211 podStartE2EDuration="2m10.830816211s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.82735086 +0000 UTC m=+153.905307977" watchObservedRunningTime="2025-12-10 00:33:00.830816211 +0000 UTC m=+153.908773318" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.903118 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" podStartSLOduration=130.903103998 podStartE2EDuration="2m10.903103998s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.901686281 +0000 UTC m=+153.979643408" watchObservedRunningTime="2025-12-10 00:33:00.903103998 +0000 UTC m=+153.981061115" Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.908998 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:33:00 crc kubenswrapper[4884]: E1210 00:33:00.909341 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:01.409326923 +0000 UTC m=+154.487284040 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:00 crc kubenswrapper[4884]: I1210 00:33:00.944459 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-m9g2t" podStartSLOduration=8.944425704 podStartE2EDuration="8.944425704s" podCreationTimestamp="2025-12-10 00:32:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:00.943977002 +0000 UTC m=+154.021934119" watchObservedRunningTime="2025-12-10 00:33:00.944425704 +0000 UTC m=+154.022382821" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.012321 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:01 crc kubenswrapper[4884]: E1210 00:33:01.012907 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:01.512895449 +0000 UTC m=+154.590852566 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.114398 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:33:01 crc kubenswrapper[4884]: E1210 00:33:01.114785 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:01.614769491 +0000 UTC m=+154.692726608 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.216313 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:01 crc kubenswrapper[4884]: E1210 00:33:01.216738 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:01.716720564 +0000 UTC m=+154.794677681 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.317608 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:33:01 crc kubenswrapper[4884]: E1210 00:33:01.318111 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:01.818086621 +0000 UTC m=+154.896043738 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.318496 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:01 crc kubenswrapper[4884]: E1210 00:33:01.319062 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:01.819038927 +0000 UTC m=+154.896996044 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.419864 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:33:01 crc kubenswrapper[4884]: E1210 00:33:01.420101 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:01.920063475 +0000 UTC m=+154.998020592 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.420268 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:01 crc kubenswrapper[4884]: E1210 00:33:01.420688 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:01.920679062 +0000 UTC m=+154.998636179 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.455764 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-zkw8s" event={"ID":"03818f02-632d-43da-ab6b-792f3628a645","Type":"ContainerStarted","Data":"1535579243ce6b5321fd43f5924f185f20e8ac324a6f0146483b37c0fb1709c5"} Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.462326 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-gg998" event={"ID":"682c346d-10e9-4029-bced-73873eb7229e","Type":"ContainerStarted","Data":"d9f3355185bb25c27af22a27ffb47f81abb816b7127d2ab98f5a4e4bccba282a"} Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.462395 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-gg998" event={"ID":"682c346d-10e9-4029-bced-73873eb7229e","Type":"ContainerStarted","Data":"4c81b242f1becafd5f49c04c843d7af9122ece53ac152318abe2e75430b9bd2e"} Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.469075 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-d474c" event={"ID":"1555588f-4670-4be8-8e6f-3270e377d2ce","Type":"ContainerStarted","Data":"f8f2a20b00f872e24b61eac2c66f77cbe5b90654b149c1fb1920a137a84d1d84"} Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.470624 4884 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-cncdj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/healthz\": dial tcp 10.217.0.26:8080: connect: connection refused" start-of-body= Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.470682 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" podUID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.26:8080/healthz\": dial tcp 10.217.0.26:8080: connect: connection refused" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.470905 4884 patch_prober.go:28] interesting pod/downloads-7954f5f757-lx9b2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.470969 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-lx9b2" podUID="faf8c014-cf9a-4495-a008-2f56745b6fab" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.494707 4884 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.522050 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:33:01 crc kubenswrapper[4884]: E1210 00:33:01.522235 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:02.022204843 +0000 UTC m=+155.100161960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.522945 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:01 crc kubenswrapper[4884]: E1210 00:33:01.525773 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:02.025752207 +0000 UTC m=+155.103709504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.624770 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:33:01 crc kubenswrapper[4884]: E1210 00:33:01.625058 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-10 00:33:02.125011659 +0000 UTC m=+155.202968776 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.625187 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:01 crc kubenswrapper[4884]: E1210 00:33:01.625673 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-10 00:33:02.125661676 +0000 UTC m=+155.203618973 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-pt6z4" (UID: "40fa21aa-487a-46a9-a396-25ba52971640") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.653483 4884 patch_prober.go:28] interesting pod/router-default-5444994796-dgdnq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 00:33:01 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Dec 10 00:33:01 crc kubenswrapper[4884]: [+]process-running ok Dec 10 00:33:01 crc kubenswrapper[4884]: healthz check failed Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.653598 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dgdnq" podUID="d976ab75-284d-4613-adcd-4620ceebf209" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.653992 4884 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-12-10T00:33:01.494751025Z","Handler":null,"Name":""} Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.657524 4884 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.657613 4884 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.727621 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.742147 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.765210 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-f7d5j"] Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.766749 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.767796 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f7d5j"] Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.770985 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.830064 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.830117 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzcvw\" (UniqueName: \"kubernetes.io/projected/93607314-0a98-4da3-bfc3-5514b65e6580-kube-api-access-lzcvw\") pod \"community-operators-f7d5j\" (UID: \"93607314-0a98-4da3-bfc3-5514b65e6580\") " pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.830176 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93607314-0a98-4da3-bfc3-5514b65e6580-utilities\") pod \"community-operators-f7d5j\" (UID: \"93607314-0a98-4da3-bfc3-5514b65e6580\") " pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.830205 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93607314-0a98-4da3-bfc3-5514b65e6580-catalog-content\") pod \"community-operators-f7d5j\" (UID: \"93607314-0a98-4da3-bfc3-5514b65e6580\") " pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.834515 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.834563 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.885895 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-pt6z4\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.931368 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzcvw\" (UniqueName: \"kubernetes.io/projected/93607314-0a98-4da3-bfc3-5514b65e6580-kube-api-access-lzcvw\") pod \"community-operators-f7d5j\" (UID: \"93607314-0a98-4da3-bfc3-5514b65e6580\") " pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.931846 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93607314-0a98-4da3-bfc3-5514b65e6580-utilities\") pod \"community-operators-f7d5j\" (UID: \"93607314-0a98-4da3-bfc3-5514b65e6580\") " pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.932091 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93607314-0a98-4da3-bfc3-5514b65e6580-catalog-content\") pod \"community-operators-f7d5j\" (UID: \"93607314-0a98-4da3-bfc3-5514b65e6580\") " pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.932710 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93607314-0a98-4da3-bfc3-5514b65e6580-catalog-content\") pod \"community-operators-f7d5j\" (UID: \"93607314-0a98-4da3-bfc3-5514b65e6580\") " pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.932872 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93607314-0a98-4da3-bfc3-5514b65e6580-utilities\") pod \"community-operators-f7d5j\" (UID: \"93607314-0a98-4da3-bfc3-5514b65e6580\") " pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.934490 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sxshd"] Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.935563 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.941081 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.951998 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sxshd"] Dec 10 00:33:01 crc kubenswrapper[4884]: I1210 00:33:01.961888 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzcvw\" (UniqueName: \"kubernetes.io/projected/93607314-0a98-4da3-bfc3-5514b65e6580-kube-api-access-lzcvw\") pod \"community-operators-f7d5j\" (UID: \"93607314-0a98-4da3-bfc3-5514b65e6580\") " pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.033683 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bl2g\" (UniqueName: \"kubernetes.io/projected/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-kube-api-access-6bl2g\") pod \"certified-operators-sxshd\" (UID: \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\") " pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.033746 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-utilities\") pod \"certified-operators-sxshd\" (UID: \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\") " pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.033803 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-catalog-content\") pod \"certified-operators-sxshd\" (UID: \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\") " pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.086564 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.134241 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vclmg"] Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.135066 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bl2g\" (UniqueName: \"kubernetes.io/projected/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-kube-api-access-6bl2g\") pod \"certified-operators-sxshd\" (UID: \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\") " pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.135152 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-utilities\") pod \"certified-operators-sxshd\" (UID: \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\") " pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.135198 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-catalog-content\") pod \"certified-operators-sxshd\" (UID: \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\") " pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.135764 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.136421 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-catalog-content\") pod \"certified-operators-sxshd\" (UID: \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\") " pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.136491 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-utilities\") pod \"certified-operators-sxshd\" (UID: \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\") " pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.147107 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vclmg"] Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.155852 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.161152 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bl2g\" (UniqueName: \"kubernetes.io/projected/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-kube-api-access-6bl2g\") pod \"certified-operators-sxshd\" (UID: \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\") " pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.237512 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdbqd\" (UniqueName: \"kubernetes.io/projected/a5ef5504-0d0c-44d9-b007-42f470547918-kube-api-access-cdbqd\") pod \"community-operators-vclmg\" (UID: \"a5ef5504-0d0c-44d9-b007-42f470547918\") " pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.238091 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5ef5504-0d0c-44d9-b007-42f470547918-catalog-content\") pod \"community-operators-vclmg\" (UID: \"a5ef5504-0d0c-44d9-b007-42f470547918\") " pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.238447 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5ef5504-0d0c-44d9-b007-42f470547918-utilities\") pod \"community-operators-vclmg\" (UID: \"a5ef5504-0d0c-44d9-b007-42f470547918\") " pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.287116 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.337622 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gbk5t"] Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.339170 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.339904 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5ef5504-0d0c-44d9-b007-42f470547918-catalog-content\") pod \"community-operators-vclmg\" (UID: \"a5ef5504-0d0c-44d9-b007-42f470547918\") " pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.340500 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5ef5504-0d0c-44d9-b007-42f470547918-catalog-content\") pod \"community-operators-vclmg\" (UID: \"a5ef5504-0d0c-44d9-b007-42f470547918\") " pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.340505 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5ef5504-0d0c-44d9-b007-42f470547918-utilities\") pod \"community-operators-vclmg\" (UID: \"a5ef5504-0d0c-44d9-b007-42f470547918\") " pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.339934 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5ef5504-0d0c-44d9-b007-42f470547918-utilities\") pod \"community-operators-vclmg\" (UID: \"a5ef5504-0d0c-44d9-b007-42f470547918\") " pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.340595 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdbqd\" (UniqueName: \"kubernetes.io/projected/a5ef5504-0d0c-44d9-b007-42f470547918-kube-api-access-cdbqd\") pod \"community-operators-vclmg\" (UID: \"a5ef5504-0d0c-44d9-b007-42f470547918\") " pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.351694 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gbk5t"] Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.367352 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdbqd\" (UniqueName: \"kubernetes.io/projected/a5ef5504-0d0c-44d9-b007-42f470547918-kube-api-access-cdbqd\") pod \"community-operators-vclmg\" (UID: \"a5ef5504-0d0c-44d9-b007-42f470547918\") " pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.386216 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-pt6z4"] Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.441874 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-catalog-content\") pod \"certified-operators-gbk5t\" (UID: \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\") " pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.442411 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vks2\" (UniqueName: \"kubernetes.io/projected/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-kube-api-access-6vks2\") pod \"certified-operators-gbk5t\" (UID: \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\") " pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.442494 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-utilities\") pod \"certified-operators-gbk5t\" (UID: \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\") " pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.455520 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.480358 4884 generic.go:334] "Generic (PLEG): container finished" podID="5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84" containerID="c1db5762ab4498c062049a31b5b657eb72dbeddf096d15940f898e86c445b03d" exitCode=0 Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.480408 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" event={"ID":"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84","Type":"ContainerDied","Data":"c1db5762ab4498c062049a31b5b657eb72dbeddf096d15940f898e86c445b03d"} Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.481458 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" event={"ID":"40fa21aa-487a-46a9-a396-25ba52971640","Type":"ContainerStarted","Data":"ea70a7ecefade4ec6f517f8e7a40aca5cc675bdbe35f109daad57ca710cefca0"} Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.487502 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-gg998" event={"ID":"682c346d-10e9-4029-bced-73873eb7229e","Type":"ContainerStarted","Data":"3f95b06e8460b40555ed8cb995e63b82623397f66caf5fa48c49ed23c80bd848"} Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.487624 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-gg998" event={"ID":"682c346d-10e9-4029-bced-73873eb7229e","Type":"ContainerStarted","Data":"95f959dc6e17be69dc28f3a0aec2689c34569a43c36ab057030ebd216ecbb77f"} Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.534065 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sxshd"] Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.553005 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-gg998" podStartSLOduration=11.552987143 podStartE2EDuration="11.552987143s" podCreationTimestamp="2025-12-10 00:32:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:02.552195812 +0000 UTC m=+155.630152939" watchObservedRunningTime="2025-12-10 00:33:02.552987143 +0000 UTC m=+155.630944260" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.553539 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-utilities\") pod \"certified-operators-gbk5t\" (UID: \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\") " pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.553624 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-catalog-content\") pod \"certified-operators-gbk5t\" (UID: \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\") " pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.553735 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vks2\" (UniqueName: \"kubernetes.io/projected/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-kube-api-access-6vks2\") pod \"certified-operators-gbk5t\" (UID: \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\") " pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.555280 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-catalog-content\") pod \"certified-operators-gbk5t\" (UID: \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\") " pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.556580 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-utilities\") pod \"certified-operators-gbk5t\" (UID: \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\") " pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.579455 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vks2\" (UniqueName: \"kubernetes.io/projected/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-kube-api-access-6vks2\") pod \"certified-operators-gbk5t\" (UID: \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\") " pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.653680 4884 patch_prober.go:28] interesting pod/router-default-5444994796-dgdnq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 00:33:02 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Dec 10 00:33:02 crc kubenswrapper[4884]: [+]process-running ok Dec 10 00:33:02 crc kubenswrapper[4884]: healthz check failed Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.653731 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dgdnq" podUID="d976ab75-284d-4613-adcd-4620ceebf209" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.669111 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.674783 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-f7d5j"] Dec 10 00:33:02 crc kubenswrapper[4884]: W1210 00:33:02.694481 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod93607314_0a98_4da3_bfc3_5514b65e6580.slice/crio-4d39ad2ee9382d1b5a3aca5460d229a105dbf99505275770381bbc8e51c992b2 WatchSource:0}: Error finding container 4d39ad2ee9382d1b5a3aca5460d229a105dbf99505275770381bbc8e51c992b2: Status 404 returned error can't find the container with id 4d39ad2ee9382d1b5a3aca5460d229a105dbf99505275770381bbc8e51c992b2 Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.837947 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vclmg"] Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.923889 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.932034 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-4d9rn" Dec 10 00:33:02 crc kubenswrapper[4884]: I1210 00:33:02.968673 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gbk5t"] Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.255083 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-6sfkf" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.294729 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.494170 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gbk5t" event={"ID":"e38b0935-5b38-43cb-b5dc-f05f458aeeb5","Type":"ContainerStarted","Data":"331a7bd6f9020b60e259fcaf35a8ef1665716c0a2d2a47a058ab56ed80c1bc24"} Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.496131 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" event={"ID":"40fa21aa-487a-46a9-a396-25ba52971640","Type":"ContainerStarted","Data":"6cf153efd5ff451c3bb34f6a9b9aa59db77830ee04761b375242a008d0c8ed72"} Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.496451 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.498399 4884 generic.go:334] "Generic (PLEG): container finished" podID="93607314-0a98-4da3-bfc3-5514b65e6580" containerID="7c5009e1d415f00004659611cbe66841d70456d125a22e85ccb5881bc02d0b56" exitCode=0 Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.498553 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f7d5j" event={"ID":"93607314-0a98-4da3-bfc3-5514b65e6580","Type":"ContainerDied","Data":"7c5009e1d415f00004659611cbe66841d70456d125a22e85ccb5881bc02d0b56"} Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.498646 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f7d5j" event={"ID":"93607314-0a98-4da3-bfc3-5514b65e6580","Type":"ContainerStarted","Data":"4d39ad2ee9382d1b5a3aca5460d229a105dbf99505275770381bbc8e51c992b2"} Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.501161 4884 generic.go:334] "Generic (PLEG): container finished" podID="a5ef5504-0d0c-44d9-b007-42f470547918" containerID="74b43700bdf763204509449ab8b11a389cdc8f3c0998c7db1955bbd49bb512c4" exitCode=0 Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.501233 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vclmg" event={"ID":"a5ef5504-0d0c-44d9-b007-42f470547918","Type":"ContainerDied","Data":"74b43700bdf763204509449ab8b11a389cdc8f3c0998c7db1955bbd49bb512c4"} Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.501260 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vclmg" event={"ID":"a5ef5504-0d0c-44d9-b007-42f470547918","Type":"ContainerStarted","Data":"26afdc8c7a228528614d401ae128b942cff7b3f55e43bb3047434edde6b9f0a5"} Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.501364 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.504067 4884 generic.go:334] "Generic (PLEG): container finished" podID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" containerID="a288f222a79f9eebde57528a3a9c37623f3d05bdbaf17718efb94883366dade1" exitCode=0 Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.504353 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sxshd" event={"ID":"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd","Type":"ContainerDied","Data":"a288f222a79f9eebde57528a3a9c37623f3d05bdbaf17718efb94883366dade1"} Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.504406 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sxshd" event={"ID":"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd","Type":"ContainerStarted","Data":"9845a990e4d1a071978e5f247e9e578c07ad7db5a29e576137ff81540a56fe2d"} Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.541719 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" podStartSLOduration=133.541698398 podStartE2EDuration="2m13.541698398s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:03.519381306 +0000 UTC m=+156.597338433" watchObservedRunningTime="2025-12-10 00:33:03.541698398 +0000 UTC m=+156.619655515" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.653029 4884 patch_prober.go:28] interesting pod/router-default-5444994796-dgdnq container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 10 00:33:03 crc kubenswrapper[4884]: [-]has-synced failed: reason withheld Dec 10 00:33:03 crc kubenswrapper[4884]: [+]process-running ok Dec 10 00:33:03 crc kubenswrapper[4884]: healthz check failed Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.653156 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-dgdnq" podUID="d976ab75-284d-4613-adcd-4620ceebf209" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.905375 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.943618 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-h6pgr"] Dec 10 00:33:03 crc kubenswrapper[4884]: E1210 00:33:03.944088 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84" containerName="collect-profiles" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.944109 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84" containerName="collect-profiles" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.944238 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84" containerName="collect-profiles" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.945213 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.947542 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.961319 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6pgr"] Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.989215 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-config-volume\") pod \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\" (UID: \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\") " Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.989368 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-secret-volume\") pod \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\" (UID: \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\") " Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.989451 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lnk28\" (UniqueName: \"kubernetes.io/projected/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-kube-api-access-lnk28\") pod \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\" (UID: \"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84\") " Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.989679 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12a04f77-054c-4a5d-80c5-2d64fcc137af-utilities\") pod \"redhat-marketplace-h6pgr\" (UID: \"12a04f77-054c-4a5d-80c5-2d64fcc137af\") " pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.989713 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12a04f77-054c-4a5d-80c5-2d64fcc137af-catalog-content\") pod \"redhat-marketplace-h6pgr\" (UID: \"12a04f77-054c-4a5d-80c5-2d64fcc137af\") " pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.989748 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqngc\" (UniqueName: \"kubernetes.io/projected/12a04f77-054c-4a5d-80c5-2d64fcc137af-kube-api-access-jqngc\") pod \"redhat-marketplace-h6pgr\" (UID: \"12a04f77-054c-4a5d-80c5-2d64fcc137af\") " pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.990364 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-config-volume" (OuterVolumeSpecName: "config-volume") pod "5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84" (UID: "5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:33:03 crc kubenswrapper[4884]: I1210 00:33:03.997846 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-kube-api-access-lnk28" (OuterVolumeSpecName: "kube-api-access-lnk28") pod "5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84" (UID: "5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84"). InnerVolumeSpecName "kube-api-access-lnk28". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.005661 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84" (UID: "5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.095232 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12a04f77-054c-4a5d-80c5-2d64fcc137af-utilities\") pod \"redhat-marketplace-h6pgr\" (UID: \"12a04f77-054c-4a5d-80c5-2d64fcc137af\") " pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.095304 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12a04f77-054c-4a5d-80c5-2d64fcc137af-catalog-content\") pod \"redhat-marketplace-h6pgr\" (UID: \"12a04f77-054c-4a5d-80c5-2d64fcc137af\") " pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.095358 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqngc\" (UniqueName: \"kubernetes.io/projected/12a04f77-054c-4a5d-80c5-2d64fcc137af-kube-api-access-jqngc\") pod \"redhat-marketplace-h6pgr\" (UID: \"12a04f77-054c-4a5d-80c5-2d64fcc137af\") " pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.095515 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.095534 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lnk28\" (UniqueName: \"kubernetes.io/projected/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-kube-api-access-lnk28\") on node \"crc\" DevicePath \"\"" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.095546 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.096006 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12a04f77-054c-4a5d-80c5-2d64fcc137af-catalog-content\") pod \"redhat-marketplace-h6pgr\" (UID: \"12a04f77-054c-4a5d-80c5-2d64fcc137af\") " pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.097197 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12a04f77-054c-4a5d-80c5-2d64fcc137af-utilities\") pod \"redhat-marketplace-h6pgr\" (UID: \"12a04f77-054c-4a5d-80c5-2d64fcc137af\") " pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.113613 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqngc\" (UniqueName: \"kubernetes.io/projected/12a04f77-054c-4a5d-80c5-2d64fcc137af-kube-api-access-jqngc\") pod \"redhat-marketplace-h6pgr\" (UID: \"12a04f77-054c-4a5d-80c5-2d64fcc137af\") " pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.243405 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.243496 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.253221 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.262395 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.336998 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7qsx6"] Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.338240 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.349084 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7qsx6"] Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.402775 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/864f39a1-a244-429d-8974-46c6a30aefbb-utilities\") pod \"redhat-marketplace-7qsx6\" (UID: \"864f39a1-a244-429d-8974-46c6a30aefbb\") " pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.402841 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/864f39a1-a244-429d-8974-46c6a30aefbb-catalog-content\") pod \"redhat-marketplace-7qsx6\" (UID: \"864f39a1-a244-429d-8974-46c6a30aefbb\") " pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.402872 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7mb4\" (UniqueName: \"kubernetes.io/projected/864f39a1-a244-429d-8974-46c6a30aefbb-kube-api-access-g7mb4\") pod \"redhat-marketplace-7qsx6\" (UID: \"864f39a1-a244-429d-8974-46c6a30aefbb\") " pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.504771 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/864f39a1-a244-429d-8974-46c6a30aefbb-utilities\") pod \"redhat-marketplace-7qsx6\" (UID: \"864f39a1-a244-429d-8974-46c6a30aefbb\") " pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.505177 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/864f39a1-a244-429d-8974-46c6a30aefbb-catalog-content\") pod \"redhat-marketplace-7qsx6\" (UID: \"864f39a1-a244-429d-8974-46c6a30aefbb\") " pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.505226 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7mb4\" (UniqueName: \"kubernetes.io/projected/864f39a1-a244-429d-8974-46c6a30aefbb-kube-api-access-g7mb4\") pod \"redhat-marketplace-7qsx6\" (UID: \"864f39a1-a244-429d-8974-46c6a30aefbb\") " pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.505892 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/864f39a1-a244-429d-8974-46c6a30aefbb-utilities\") pod \"redhat-marketplace-7qsx6\" (UID: \"864f39a1-a244-429d-8974-46c6a30aefbb\") " pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.505909 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/864f39a1-a244-429d-8974-46c6a30aefbb-catalog-content\") pod \"redhat-marketplace-7qsx6\" (UID: \"864f39a1-a244-429d-8974-46c6a30aefbb\") " pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.507364 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6pgr"] Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.513852 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" event={"ID":"5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84","Type":"ContainerDied","Data":"6b7fba3b510ce251220c117afba98a7a7be56e0c97f00a7805caf5d816fefb81"} Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.513903 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b7fba3b510ce251220c117afba98a7a7be56e0c97f00a7805caf5d816fefb81" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.513979 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.523234 4884 generic.go:334] "Generic (PLEG): container finished" podID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" containerID="92ee8c9c946676c0e684a29843e748eb97f1e38329a2690639fb4eb0bbeba520" exitCode=0 Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.525220 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gbk5t" event={"ID":"e38b0935-5b38-43cb-b5dc-f05f458aeeb5","Type":"ContainerDied","Data":"92ee8c9c946676c0e684a29843e748eb97f1e38329a2690639fb4eb0bbeba520"} Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.534857 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7mb4\" (UniqueName: \"kubernetes.io/projected/864f39a1-a244-429d-8974-46c6a30aefbb-kube-api-access-g7mb4\") pod \"redhat-marketplace-7qsx6\" (UID: \"864f39a1-a244-429d-8974-46c6a30aefbb\") " pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.540557 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-m6pkc" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.606039 4884 patch_prober.go:28] interesting pod/downloads-7954f5f757-lx9b2 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.606108 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-lx9b2" podUID="faf8c014-cf9a-4495-a008-2f56745b6fab" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.606632 4884 patch_prober.go:28] interesting pod/downloads-7954f5f757-lx9b2 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" start-of-body= Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.606667 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-lx9b2" podUID="faf8c014-cf9a-4495-a008-2f56745b6fab" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.19:8080/\": dial tcp 10.217.0.19:8080: connect: connection refused" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.652155 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.656921 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.661262 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.859519 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.859923 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.874923 4884 patch_prober.go:28] interesting pod/console-f9d7485db-5hrps container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.33:8443/health\": dial tcp 10.217.0.33:8443: connect: connection refused" start-of-body= Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.875005 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-5hrps" podUID="34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" containerName="console" probeResult="failure" output="Get \"https://10.217.0.33:8443/health\": dial tcp 10.217.0.33:8443: connect: connection refused" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.907333 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.908299 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.914552 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.914710 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.936864 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.955976 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mnp4t"] Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.957186 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.960051 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.967692 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mnp4t"] Dec 10 00:33:04 crc kubenswrapper[4884]: I1210 00:33:04.971904 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.027999 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpkzz\" (UniqueName: \"kubernetes.io/projected/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-kube-api-access-vpkzz\") pod \"redhat-operators-mnp4t\" (UID: \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\") " pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.028190 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-utilities\") pod \"redhat-operators-mnp4t\" (UID: \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\") " pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.028209 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c86c0495-532e-4f3f-b338-a3bc9daa0e58-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c86c0495-532e-4f3f-b338-a3bc9daa0e58\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.028226 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-catalog-content\") pod \"redhat-operators-mnp4t\" (UID: \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\") " pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.028301 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c86c0495-532e-4f3f-b338-a3bc9daa0e58-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c86c0495-532e-4f3f-b338-a3bc9daa0e58\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.121235 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7qsx6"] Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.133170 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c86c0495-532e-4f3f-b338-a3bc9daa0e58-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c86c0495-532e-4f3f-b338-a3bc9daa0e58\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.133235 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-utilities\") pod \"redhat-operators-mnp4t\" (UID: \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\") " pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.133259 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-catalog-content\") pod \"redhat-operators-mnp4t\" (UID: \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\") " pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.133300 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c86c0495-532e-4f3f-b338-a3bc9daa0e58-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c86c0495-532e-4f3f-b338-a3bc9daa0e58\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.133319 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c86c0495-532e-4f3f-b338-a3bc9daa0e58-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"c86c0495-532e-4f3f-b338-a3bc9daa0e58\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.133348 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpkzz\" (UniqueName: \"kubernetes.io/projected/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-kube-api-access-vpkzz\") pod \"redhat-operators-mnp4t\" (UID: \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\") " pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.134155 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-catalog-content\") pod \"redhat-operators-mnp4t\" (UID: \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\") " pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.134215 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-utilities\") pod \"redhat-operators-mnp4t\" (UID: \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\") " pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.165573 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c86c0495-532e-4f3f-b338-a3bc9daa0e58-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"c86c0495-532e-4f3f-b338-a3bc9daa0e58\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.165611 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpkzz\" (UniqueName: \"kubernetes.io/projected/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-kube-api-access-vpkzz\") pod \"redhat-operators-mnp4t\" (UID: \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\") " pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.238769 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.292187 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.341647 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rdvc9"] Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.347958 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.351546 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rdvc9"] Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.437968 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e6acb02-4f9e-4771-9213-955aa68cfa43-utilities\") pod \"redhat-operators-rdvc9\" (UID: \"0e6acb02-4f9e-4771-9213-955aa68cfa43\") " pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.438387 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qq8wq\" (UniqueName: \"kubernetes.io/projected/0e6acb02-4f9e-4771-9213-955aa68cfa43-kube-api-access-qq8wq\") pod \"redhat-operators-rdvc9\" (UID: \"0e6acb02-4f9e-4771-9213-955aa68cfa43\") " pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.438509 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e6acb02-4f9e-4771-9213-955aa68cfa43-catalog-content\") pod \"redhat-operators-rdvc9\" (UID: \"0e6acb02-4f9e-4771-9213-955aa68cfa43\") " pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.541445 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e6acb02-4f9e-4771-9213-955aa68cfa43-utilities\") pod \"redhat-operators-rdvc9\" (UID: \"0e6acb02-4f9e-4771-9213-955aa68cfa43\") " pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.541606 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qq8wq\" (UniqueName: \"kubernetes.io/projected/0e6acb02-4f9e-4771-9213-955aa68cfa43-kube-api-access-qq8wq\") pod \"redhat-operators-rdvc9\" (UID: \"0e6acb02-4f9e-4771-9213-955aa68cfa43\") " pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.541747 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e6acb02-4f9e-4771-9213-955aa68cfa43-catalog-content\") pod \"redhat-operators-rdvc9\" (UID: \"0e6acb02-4f9e-4771-9213-955aa68cfa43\") " pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.542041 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e6acb02-4f9e-4771-9213-955aa68cfa43-utilities\") pod \"redhat-operators-rdvc9\" (UID: \"0e6acb02-4f9e-4771-9213-955aa68cfa43\") " pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.542204 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7qsx6" event={"ID":"864f39a1-a244-429d-8974-46c6a30aefbb","Type":"ContainerStarted","Data":"b36dc8b37356893f95ebdf10fcfb6e6b2a53d23454831495fb5788be964ad0c5"} Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.544343 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e6acb02-4f9e-4771-9213-955aa68cfa43-catalog-content\") pod \"redhat-operators-rdvc9\" (UID: \"0e6acb02-4f9e-4771-9213-955aa68cfa43\") " pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.547866 4884 generic.go:334] "Generic (PLEG): container finished" podID="12a04f77-054c-4a5d-80c5-2d64fcc137af" containerID="b0594ec62925f87e7c0d6bc4c1a9d1f7da28a830dc99e8795bfe1e7e6a074d4d" exitCode=0 Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.548109 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6pgr" event={"ID":"12a04f77-054c-4a5d-80c5-2d64fcc137af","Type":"ContainerDied","Data":"b0594ec62925f87e7c0d6bc4c1a9d1f7da28a830dc99e8795bfe1e7e6a074d4d"} Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.548220 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6pgr" event={"ID":"12a04f77-054c-4a5d-80c5-2d64fcc137af","Type":"ContainerStarted","Data":"1ffd331c45defaa50e288327f3f6528262b3f4acdfc4a30b42d550942895844b"} Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.567291 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-dgdnq" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.592279 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qq8wq\" (UniqueName: \"kubernetes.io/projected/0e6acb02-4f9e-4771-9213-955aa68cfa43-kube-api-access-qq8wq\") pod \"redhat-operators-rdvc9\" (UID: \"0e6acb02-4f9e-4771-9213-955aa68cfa43\") " pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.678848 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.764095 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 10 00:33:05 crc kubenswrapper[4884]: I1210 00:33:05.787665 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mnp4t"] Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.058936 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rdvc9"] Dec 10 00:33:06 crc kubenswrapper[4884]: W1210 00:33:06.080480 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e6acb02_4f9e_4771_9213_955aa68cfa43.slice/crio-93ad622b64295ee43ac8e3b8db3804784d1741aca5822fd5ba97c7036ff7cdc6 WatchSource:0}: Error finding container 93ad622b64295ee43ac8e3b8db3804784d1741aca5822fd5ba97c7036ff7cdc6: Status 404 returned error can't find the container with id 93ad622b64295ee43ac8e3b8db3804784d1741aca5822fd5ba97c7036ff7cdc6 Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.299536 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.300908 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.310485 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.310741 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.313850 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.373499 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63fca722-ee06-4868-9945-9f765dcbcee6-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"63fca722-ee06-4868-9945-9f765dcbcee6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.373626 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63fca722-ee06-4868-9945-9f765dcbcee6-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"63fca722-ee06-4868-9945-9f765dcbcee6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.478361 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63fca722-ee06-4868-9945-9f765dcbcee6-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"63fca722-ee06-4868-9945-9f765dcbcee6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.478487 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63fca722-ee06-4868-9945-9f765dcbcee6-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"63fca722-ee06-4868-9945-9f765dcbcee6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.478580 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63fca722-ee06-4868-9945-9f765dcbcee6-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"63fca722-ee06-4868-9945-9f765dcbcee6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.512594 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63fca722-ee06-4868-9945-9f765dcbcee6-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"63fca722-ee06-4868-9945-9f765dcbcee6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.574011 4884 generic.go:334] "Generic (PLEG): container finished" podID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" containerID="bc7239d47d94b557e47551a10b2d9506c4905beec3271b3750f21dedbb9d605c" exitCode=0 Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.574084 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnp4t" event={"ID":"8eba9bb1-b07d-4d37-99f5-ddd952f9f681","Type":"ContainerDied","Data":"bc7239d47d94b557e47551a10b2d9506c4905beec3271b3750f21dedbb9d605c"} Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.574114 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnp4t" event={"ID":"8eba9bb1-b07d-4d37-99f5-ddd952f9f681","Type":"ContainerStarted","Data":"8ee4d6a81b235cf486abc75d7d33a3b105db7c5ee1bcd68327e16f6c8f252245"} Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.583321 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c86c0495-532e-4f3f-b338-a3bc9daa0e58","Type":"ContainerStarted","Data":"fac671b7947ceb2093c5319502574c0eed3155e92deac2fb08cd627afbc03045"} Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.588615 4884 generic.go:334] "Generic (PLEG): container finished" podID="864f39a1-a244-429d-8974-46c6a30aefbb" containerID="e7b3efabf0b9726036739eced9bace6e82c860658f8d7d8394ba901e417197d2" exitCode=0 Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.588657 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7qsx6" event={"ID":"864f39a1-a244-429d-8974-46c6a30aefbb","Type":"ContainerDied","Data":"e7b3efabf0b9726036739eced9bace6e82c860658f8d7d8394ba901e417197d2"} Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.602159 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rdvc9" event={"ID":"0e6acb02-4f9e-4771-9213-955aa68cfa43","Type":"ContainerStarted","Data":"93ad622b64295ee43ac8e3b8db3804784d1741aca5822fd5ba97c7036ff7cdc6"} Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.633983 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 00:33:06 crc kubenswrapper[4884]: I1210 00:33:06.989202 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 10 00:33:07 crc kubenswrapper[4884]: W1210 00:33:07.036511 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod63fca722_ee06_4868_9945_9f765dcbcee6.slice/crio-6ec05b88993f4a04f23827ea78106baec2be8bc0e5d9134264051a8ac2bbd5fc WatchSource:0}: Error finding container 6ec05b88993f4a04f23827ea78106baec2be8bc0e5d9134264051a8ac2bbd5fc: Status 404 returned error can't find the container with id 6ec05b88993f4a04f23827ea78106baec2be8bc0e5d9134264051a8ac2bbd5fc Dec 10 00:33:07 crc kubenswrapper[4884]: I1210 00:33:07.648014 4884 generic.go:334] "Generic (PLEG): container finished" podID="c86c0495-532e-4f3f-b338-a3bc9daa0e58" containerID="1118477af47ad505f611477711d363633670bfa2c1a9ade081a93725337ac95d" exitCode=0 Dec 10 00:33:07 crc kubenswrapper[4884]: I1210 00:33:07.648368 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c86c0495-532e-4f3f-b338-a3bc9daa0e58","Type":"ContainerDied","Data":"1118477af47ad505f611477711d363633670bfa2c1a9ade081a93725337ac95d"} Dec 10 00:33:07 crc kubenswrapper[4884]: I1210 00:33:07.655804 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"63fca722-ee06-4868-9945-9f765dcbcee6","Type":"ContainerStarted","Data":"6ec05b88993f4a04f23827ea78106baec2be8bc0e5d9134264051a8ac2bbd5fc"} Dec 10 00:33:07 crc kubenswrapper[4884]: I1210 00:33:07.665495 4884 generic.go:334] "Generic (PLEG): container finished" podID="0e6acb02-4f9e-4771-9213-955aa68cfa43" containerID="eb96f84f23ab7ca6ebc7bda4f021a7df6f939733f5cee38b83f2e1028689d985" exitCode=0 Dec 10 00:33:07 crc kubenswrapper[4884]: I1210 00:33:07.665574 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rdvc9" event={"ID":"0e6acb02-4f9e-4771-9213-955aa68cfa43","Type":"ContainerDied","Data":"eb96f84f23ab7ca6ebc7bda4f021a7df6f939733f5cee38b83f2e1028689d985"} Dec 10 00:33:08 crc kubenswrapper[4884]: I1210 00:33:08.682079 4884 generic.go:334] "Generic (PLEG): container finished" podID="63fca722-ee06-4868-9945-9f765dcbcee6" containerID="e0cd7768091b082ad251c1be52e9bb80847ba0f6dc8dacd4615f0c7c58e9b521" exitCode=0 Dec 10 00:33:08 crc kubenswrapper[4884]: I1210 00:33:08.682338 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"63fca722-ee06-4868-9945-9f765dcbcee6","Type":"ContainerDied","Data":"e0cd7768091b082ad251c1be52e9bb80847ba0f6dc8dacd4615f0c7c58e9b521"} Dec 10 00:33:09 crc kubenswrapper[4884]: I1210 00:33:09.061687 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 00:33:09 crc kubenswrapper[4884]: I1210 00:33:09.140528 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c86c0495-532e-4f3f-b338-a3bc9daa0e58-kube-api-access\") pod \"c86c0495-532e-4f3f-b338-a3bc9daa0e58\" (UID: \"c86c0495-532e-4f3f-b338-a3bc9daa0e58\") " Dec 10 00:33:09 crc kubenswrapper[4884]: I1210 00:33:09.140639 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c86c0495-532e-4f3f-b338-a3bc9daa0e58-kubelet-dir\") pod \"c86c0495-532e-4f3f-b338-a3bc9daa0e58\" (UID: \"c86c0495-532e-4f3f-b338-a3bc9daa0e58\") " Dec 10 00:33:09 crc kubenswrapper[4884]: I1210 00:33:09.141128 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c86c0495-532e-4f3f-b338-a3bc9daa0e58-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "c86c0495-532e-4f3f-b338-a3bc9daa0e58" (UID: "c86c0495-532e-4f3f-b338-a3bc9daa0e58"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:33:09 crc kubenswrapper[4884]: I1210 00:33:09.149680 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c86c0495-532e-4f3f-b338-a3bc9daa0e58-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "c86c0495-532e-4f3f-b338-a3bc9daa0e58" (UID: "c86c0495-532e-4f3f-b338-a3bc9daa0e58"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:33:09 crc kubenswrapper[4884]: I1210 00:33:09.242506 4884 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c86c0495-532e-4f3f-b338-a3bc9daa0e58-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 10 00:33:09 crc kubenswrapper[4884]: I1210 00:33:09.242542 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c86c0495-532e-4f3f-b338-a3bc9daa0e58-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 00:33:09 crc kubenswrapper[4884]: I1210 00:33:09.652403 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:33:09 crc kubenswrapper[4884]: I1210 00:33:09.722583 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 10 00:33:09 crc kubenswrapper[4884]: I1210 00:33:09.723046 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"c86c0495-532e-4f3f-b338-a3bc9daa0e58","Type":"ContainerDied","Data":"fac671b7947ceb2093c5319502574c0eed3155e92deac2fb08cd627afbc03045"} Dec 10 00:33:09 crc kubenswrapper[4884]: I1210 00:33:09.723069 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fac671b7947ceb2093c5319502574c0eed3155e92deac2fb08cd627afbc03045" Dec 10 00:33:10 crc kubenswrapper[4884]: I1210 00:33:10.102243 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-d474c" Dec 10 00:33:10 crc kubenswrapper[4884]: I1210 00:33:10.186990 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 00:33:10 crc kubenswrapper[4884]: I1210 00:33:10.282849 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63fca722-ee06-4868-9945-9f765dcbcee6-kubelet-dir\") pod \"63fca722-ee06-4868-9945-9f765dcbcee6\" (UID: \"63fca722-ee06-4868-9945-9f765dcbcee6\") " Dec 10 00:33:10 crc kubenswrapper[4884]: I1210 00:33:10.282933 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63fca722-ee06-4868-9945-9f765dcbcee6-kube-api-access\") pod \"63fca722-ee06-4868-9945-9f765dcbcee6\" (UID: \"63fca722-ee06-4868-9945-9f765dcbcee6\") " Dec 10 00:33:10 crc kubenswrapper[4884]: I1210 00:33:10.283008 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/63fca722-ee06-4868-9945-9f765dcbcee6-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "63fca722-ee06-4868-9945-9f765dcbcee6" (UID: "63fca722-ee06-4868-9945-9f765dcbcee6"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:33:10 crc kubenswrapper[4884]: I1210 00:33:10.287948 4884 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/63fca722-ee06-4868-9945-9f765dcbcee6-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 10 00:33:10 crc kubenswrapper[4884]: I1210 00:33:10.295227 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63fca722-ee06-4868-9945-9f765dcbcee6-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "63fca722-ee06-4868-9945-9f765dcbcee6" (UID: "63fca722-ee06-4868-9945-9f765dcbcee6"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:33:10 crc kubenswrapper[4884]: I1210 00:33:10.389100 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63fca722-ee06-4868-9945-9f765dcbcee6-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 00:33:10 crc kubenswrapper[4884]: I1210 00:33:10.868059 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"63fca722-ee06-4868-9945-9f765dcbcee6","Type":"ContainerDied","Data":"6ec05b88993f4a04f23827ea78106baec2be8bc0e5d9134264051a8ac2bbd5fc"} Dec 10 00:33:10 crc kubenswrapper[4884]: I1210 00:33:10.868392 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ec05b88993f4a04f23827ea78106baec2be8bc0e5d9134264051a8ac2bbd5fc" Dec 10 00:33:10 crc kubenswrapper[4884]: I1210 00:33:10.868524 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 10 00:33:13 crc kubenswrapper[4884]: I1210 00:33:13.243660 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:33:13 crc kubenswrapper[4884]: I1210 00:33:13.251131 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ec324800-e820-40c0-8b51-b020075f09eb-metrics-certs\") pod \"network-metrics-daemon-ndwnl\" (UID: \"ec324800-e820-40c0-8b51-b020075f09eb\") " pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:33:13 crc kubenswrapper[4884]: I1210 00:33:13.417132 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-ndwnl" Dec 10 00:33:14 crc kubenswrapper[4884]: I1210 00:33:14.616272 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-lx9b2" Dec 10 00:33:14 crc kubenswrapper[4884]: I1210 00:33:14.870250 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:33:14 crc kubenswrapper[4884]: I1210 00:33:14.875716 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:33:18 crc kubenswrapper[4884]: I1210 00:33:18.099037 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:33:18 crc kubenswrapper[4884]: I1210 00:33:18.099640 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:33:22 crc kubenswrapper[4884]: I1210 00:33:22.112740 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:33:23 crc kubenswrapper[4884]: I1210 00:33:23.893285 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 10 00:33:26 crc kubenswrapper[4884]: I1210 00:33:26.046346 4884 generic.go:334] "Generic (PLEG): container finished" podID="f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be" containerID="bf90c5e073cb9a54d9007051c32418d7c4d65221e66272f4116fec8bffe1014f" exitCode=0 Dec 10 00:33:26 crc kubenswrapper[4884]: I1210 00:33:26.046462 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29422080-pq6kj" event={"ID":"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be","Type":"ContainerDied","Data":"bf90c5e073cb9a54d9007051c32418d7c4d65221e66272f4116fec8bffe1014f"} Dec 10 00:33:30 crc kubenswrapper[4884]: I1210 00:33:30.716621 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29422080-pq6kj" Dec 10 00:33:30 crc kubenswrapper[4884]: I1210 00:33:30.875925 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rhvg\" (UniqueName: \"kubernetes.io/projected/f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be-kube-api-access-4rhvg\") pod \"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be\" (UID: \"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be\") " Dec 10 00:33:30 crc kubenswrapper[4884]: I1210 00:33:30.876105 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be-serviceca\") pod \"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be\" (UID: \"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be\") " Dec 10 00:33:30 crc kubenswrapper[4884]: I1210 00:33:30.877418 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be-serviceca" (OuterVolumeSpecName: "serviceca") pod "f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be" (UID: "f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:33:30 crc kubenswrapper[4884]: I1210 00:33:30.883874 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be-kube-api-access-4rhvg" (OuterVolumeSpecName: "kube-api-access-4rhvg") pod "f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be" (UID: "f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be"). InnerVolumeSpecName "kube-api-access-4rhvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:33:30 crc kubenswrapper[4884]: I1210 00:33:30.977983 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rhvg\" (UniqueName: \"kubernetes.io/projected/f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be-kube-api-access-4rhvg\") on node \"crc\" DevicePath \"\"" Dec 10 00:33:30 crc kubenswrapper[4884]: I1210 00:33:30.978020 4884 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be-serviceca\") on node \"crc\" DevicePath \"\"" Dec 10 00:33:31 crc kubenswrapper[4884]: I1210 00:33:31.087283 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29422080-pq6kj" event={"ID":"f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be","Type":"ContainerDied","Data":"375e27a17c6be9c177599518160f4e661ab1264ece35dca5790381b836edd3fb"} Dec 10 00:33:31 crc kubenswrapper[4884]: I1210 00:33:31.087317 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29422080-pq6kj" Dec 10 00:33:31 crc kubenswrapper[4884]: I1210 00:33:31.087380 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="375e27a17c6be9c177599518160f4e661ab1264ece35dca5790381b836edd3fb" Dec 10 00:33:34 crc kubenswrapper[4884]: I1210 00:33:34.995382 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-k28nx" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.303860 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 10 00:33:39 crc kubenswrapper[4884]: E1210 00:33:39.304652 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be" containerName="image-pruner" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.304675 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be" containerName="image-pruner" Dec 10 00:33:39 crc kubenswrapper[4884]: E1210 00:33:39.304698 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63fca722-ee06-4868-9945-9f765dcbcee6" containerName="pruner" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.304713 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="63fca722-ee06-4868-9945-9f765dcbcee6" containerName="pruner" Dec 10 00:33:39 crc kubenswrapper[4884]: E1210 00:33:39.304740 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c86c0495-532e-4f3f-b338-a3bc9daa0e58" containerName="pruner" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.304755 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c86c0495-532e-4f3f-b338-a3bc9daa0e58" containerName="pruner" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.304945 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3e16b9b-bb1e-42e5-aa48-cc0f2e6cb8be" containerName="image-pruner" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.304977 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="63fca722-ee06-4868-9945-9f765dcbcee6" containerName="pruner" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.304996 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c86c0495-532e-4f3f-b338-a3bc9daa0e58" containerName="pruner" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.306338 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.309817 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.310227 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.323398 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.415937 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.416071 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.518005 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.518151 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.518202 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.555224 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 00:33:39 crc kubenswrapper[4884]: I1210 00:33:39.631989 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 00:33:42 crc kubenswrapper[4884]: E1210 00:33:42.293969 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 10 00:33:42 crc kubenswrapper[4884]: E1210 00:33:42.294364 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6bl2g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-sxshd_openshift-marketplace(89270eb2-fb6c-41ec-bed8-ff42dc28c6fd): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 00:33:42 crc kubenswrapper[4884]: E1210 00:33:42.296394 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-sxshd" podUID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.497880 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.498575 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.505533 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.599451 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7f04c080-cd3a-44c8-821e-1c9ab9baff78-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.599867 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7f04c080-cd3a-44c8-821e-1c9ab9baff78-kube-api-access\") pod \"installer-9-crc\" (UID: \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.599908 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7f04c080-cd3a-44c8-821e-1c9ab9baff78-var-lock\") pod \"installer-9-crc\" (UID: \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.707310 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7f04c080-cd3a-44c8-821e-1c9ab9baff78-kube-api-access\") pod \"installer-9-crc\" (UID: \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.707405 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7f04c080-cd3a-44c8-821e-1c9ab9baff78-var-lock\") pod \"installer-9-crc\" (UID: \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.707588 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7f04c080-cd3a-44c8-821e-1c9ab9baff78-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.707617 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7f04c080-cd3a-44c8-821e-1c9ab9baff78-kubelet-dir\") pod \"installer-9-crc\" (UID: \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.707580 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7f04c080-cd3a-44c8-821e-1c9ab9baff78-var-lock\") pod \"installer-9-crc\" (UID: \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.748617 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7f04c080-cd3a-44c8-821e-1c9ab9baff78-kube-api-access\") pod \"installer-9-crc\" (UID: \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:33:44 crc kubenswrapper[4884]: I1210 00:33:44.880488 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:33:47 crc kubenswrapper[4884]: E1210 00:33:47.072733 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-sxshd" podUID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" Dec 10 00:33:47 crc kubenswrapper[4884]: E1210 00:33:47.152158 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 10 00:33:47 crc kubenswrapper[4884]: E1210 00:33:47.152325 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vpkzz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-mnp4t_openshift-marketplace(8eba9bb1-b07d-4d37-99f5-ddd952f9f681): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 00:33:47 crc kubenswrapper[4884]: E1210 00:33:47.153814 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-mnp4t" podUID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" Dec 10 00:33:48 crc kubenswrapper[4884]: I1210 00:33:48.098814 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:33:48 crc kubenswrapper[4884]: I1210 00:33:48.099293 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.558127 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-mnp4t" podUID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.653811 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.654073 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cdbqd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-vclmg_openshift-marketplace(a5ef5504-0d0c-44d9-b007-42f470547918): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.656711 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-vclmg" podUID="a5ef5504-0d0c-44d9-b007-42f470547918" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.658460 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.658640 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6vks2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-gbk5t_openshift-marketplace(e38b0935-5b38-43cb-b5dc-f05f458aeeb5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.659911 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-gbk5t" podUID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.670883 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.671092 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lzcvw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-f7d5j_openshift-marketplace(93607314-0a98-4da3-bfc3-5514b65e6580): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.672324 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-f7d5j" podUID="93607314-0a98-4da3-bfc3-5514b65e6580" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.673018 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.673104 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qq8wq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-rdvc9_openshift-marketplace(0e6acb02-4f9e-4771-9213-955aa68cfa43): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 00:33:48 crc kubenswrapper[4884]: E1210 00:33:48.674177 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-rdvc9" podUID="0e6acb02-4f9e-4771-9213-955aa68cfa43" Dec 10 00:33:49 crc kubenswrapper[4884]: E1210 00:33:49.856852 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-rdvc9" podUID="0e6acb02-4f9e-4771-9213-955aa68cfa43" Dec 10 00:33:49 crc kubenswrapper[4884]: E1210 00:33:49.857423 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-gbk5t" podUID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" Dec 10 00:33:49 crc kubenswrapper[4884]: E1210 00:33:49.857508 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-f7d5j" podUID="93607314-0a98-4da3-bfc3-5514b65e6580" Dec 10 00:33:49 crc kubenswrapper[4884]: E1210 00:33:49.857686 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-vclmg" podUID="a5ef5504-0d0c-44d9-b007-42f470547918" Dec 10 00:33:49 crc kubenswrapper[4884]: E1210 00:33:49.929260 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 10 00:33:49 crc kubenswrapper[4884]: E1210 00:33:49.929481 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jqngc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-h6pgr_openshift-marketplace(12a04f77-054c-4a5d-80c5-2d64fcc137af): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 00:33:49 crc kubenswrapper[4884]: E1210 00:33:49.930722 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-h6pgr" podUID="12a04f77-054c-4a5d-80c5-2d64fcc137af" Dec 10 00:33:49 crc kubenswrapper[4884]: E1210 00:33:49.973254 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 10 00:33:49 crc kubenswrapper[4884]: E1210 00:33:49.973579 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g7mb4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-7qsx6_openshift-marketplace(864f39a1-a244-429d-8974-46c6a30aefbb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 00:33:49 crc kubenswrapper[4884]: E1210 00:33:49.974953 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-7qsx6" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" Dec 10 00:33:50 crc kubenswrapper[4884]: E1210 00:33:50.224959 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-7qsx6" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" Dec 10 00:33:50 crc kubenswrapper[4884]: E1210 00:33:50.225021 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-h6pgr" podUID="12a04f77-054c-4a5d-80c5-2d64fcc137af" Dec 10 00:33:50 crc kubenswrapper[4884]: I1210 00:33:50.310135 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-ndwnl"] Dec 10 00:33:50 crc kubenswrapper[4884]: I1210 00:33:50.374534 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 10 00:33:50 crc kubenswrapper[4884]: I1210 00:33:50.377888 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 10 00:33:51 crc kubenswrapper[4884]: I1210 00:33:51.229194 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f","Type":"ContainerStarted","Data":"d8633ab60c3ffb7b697ec5f7607186bd5e7f65ed23f0a02e3c9c12bf72cd24ea"} Dec 10 00:33:51 crc kubenswrapper[4884]: I1210 00:33:51.229250 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f","Type":"ContainerStarted","Data":"4cf8a2f24a2185b653ffcad112d2b2d11c629425073f268d61afb70c48d94b53"} Dec 10 00:33:51 crc kubenswrapper[4884]: I1210 00:33:51.232834 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7f04c080-cd3a-44c8-821e-1c9ab9baff78","Type":"ContainerStarted","Data":"cbfd7afe7432d5212470b8f460f6ae65c034cf10fc3533900384ff7caab9af73"} Dec 10 00:33:51 crc kubenswrapper[4884]: I1210 00:33:51.232886 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7f04c080-cd3a-44c8-821e-1c9ab9baff78","Type":"ContainerStarted","Data":"1cbcfb23a4d7a174c25dbfe7bafa584a8cdfa9d620bc83a790b419770b5988b5"} Dec 10 00:33:51 crc kubenswrapper[4884]: I1210 00:33:51.236072 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" event={"ID":"ec324800-e820-40c0-8b51-b020075f09eb","Type":"ContainerStarted","Data":"4f2b9532a528fd443ec1637e3f2fb8ba28f2e009b618609e28fe88b382c4ba76"} Dec 10 00:33:51 crc kubenswrapper[4884]: I1210 00:33:51.236131 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" event={"ID":"ec324800-e820-40c0-8b51-b020075f09eb","Type":"ContainerStarted","Data":"a306fdaaca6856570571efbd945bbb9b6a6942eff8b6185a2d14bd13fc97c170"} Dec 10 00:33:51 crc kubenswrapper[4884]: I1210 00:33:51.236148 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-ndwnl" event={"ID":"ec324800-e820-40c0-8b51-b020075f09eb","Type":"ContainerStarted","Data":"fd1c81191523e47ff68d386e55c71fe3b86a6b076b7a6d8fc1c9932c3cba1865"} Dec 10 00:33:51 crc kubenswrapper[4884]: I1210 00:33:51.247135 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=12.247104011 podStartE2EDuration="12.247104011s" podCreationTimestamp="2025-12-10 00:33:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:51.245465886 +0000 UTC m=+204.323423013" watchObservedRunningTime="2025-12-10 00:33:51.247104011 +0000 UTC m=+204.325061138" Dec 10 00:33:51 crc kubenswrapper[4884]: I1210 00:33:51.267872 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-ndwnl" podStartSLOduration=181.267841547 podStartE2EDuration="3m1.267841547s" podCreationTimestamp="2025-12-10 00:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:51.264025322 +0000 UTC m=+204.341982449" watchObservedRunningTime="2025-12-10 00:33:51.267841547 +0000 UTC m=+204.345798654" Dec 10 00:33:51 crc kubenswrapper[4884]: I1210 00:33:51.285776 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=7.285749616 podStartE2EDuration="7.285749616s" podCreationTimestamp="2025-12-10 00:33:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:33:51.283461613 +0000 UTC m=+204.361418740" watchObservedRunningTime="2025-12-10 00:33:51.285749616 +0000 UTC m=+204.363706743" Dec 10 00:33:52 crc kubenswrapper[4884]: I1210 00:33:52.248347 4884 generic.go:334] "Generic (PLEG): container finished" podID="fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f" containerID="d8633ab60c3ffb7b697ec5f7607186bd5e7f65ed23f0a02e3c9c12bf72cd24ea" exitCode=0 Dec 10 00:33:52 crc kubenswrapper[4884]: I1210 00:33:52.248473 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f","Type":"ContainerDied","Data":"d8633ab60c3ffb7b697ec5f7607186bd5e7f65ed23f0a02e3c9c12bf72cd24ea"} Dec 10 00:33:53 crc kubenswrapper[4884]: I1210 00:33:53.592541 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 00:33:53 crc kubenswrapper[4884]: I1210 00:33:53.646367 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f-kube-api-access\") pod \"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f\" (UID: \"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f\") " Dec 10 00:33:53 crc kubenswrapper[4884]: I1210 00:33:53.646501 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f-kubelet-dir\") pod \"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f\" (UID: \"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f\") " Dec 10 00:33:53 crc kubenswrapper[4884]: I1210 00:33:53.646776 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f" (UID: "fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:33:53 crc kubenswrapper[4884]: I1210 00:33:53.655343 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f" (UID: "fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:33:53 crc kubenswrapper[4884]: I1210 00:33:53.748144 4884 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 10 00:33:53 crc kubenswrapper[4884]: I1210 00:33:53.748190 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 00:33:54 crc kubenswrapper[4884]: I1210 00:33:54.263553 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f","Type":"ContainerDied","Data":"4cf8a2f24a2185b653ffcad112d2b2d11c629425073f268d61afb70c48d94b53"} Dec 10 00:33:54 crc kubenswrapper[4884]: I1210 00:33:54.263601 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4cf8a2f24a2185b653ffcad112d2b2d11c629425073f268d61afb70c48d94b53" Dec 10 00:33:54 crc kubenswrapper[4884]: I1210 00:33:54.263659 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 10 00:34:00 crc kubenswrapper[4884]: I1210 00:34:00.302765 4884 generic.go:334] "Generic (PLEG): container finished" podID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" containerID="d1961d4d21c795e52b61b681c99e44c56c889e9aa6bf0dcaeb3aeb1158642942" exitCode=0 Dec 10 00:34:00 crc kubenswrapper[4884]: I1210 00:34:00.303386 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sxshd" event={"ID":"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd","Type":"ContainerDied","Data":"d1961d4d21c795e52b61b681c99e44c56c889e9aa6bf0dcaeb3aeb1158642942"} Dec 10 00:34:02 crc kubenswrapper[4884]: I1210 00:34:02.955379 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qjqjf"] Dec 10 00:34:03 crc kubenswrapper[4884]: I1210 00:34:03.321575 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sxshd" event={"ID":"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd","Type":"ContainerStarted","Data":"1a0ccb23c70ddfe5d57cee31f84a3ab3629c024c2f2689b434d21a89afc99fc2"} Dec 10 00:34:03 crc kubenswrapper[4884]: I1210 00:34:03.323115 4884 generic.go:334] "Generic (PLEG): container finished" podID="12a04f77-054c-4a5d-80c5-2d64fcc137af" containerID="9ba2c7bb2984aba4f501b8d611d90492f663401aea53f12ae93841c700efb749" exitCode=0 Dec 10 00:34:03 crc kubenswrapper[4884]: I1210 00:34:03.323172 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6pgr" event={"ID":"12a04f77-054c-4a5d-80c5-2d64fcc137af","Type":"ContainerDied","Data":"9ba2c7bb2984aba4f501b8d611d90492f663401aea53f12ae93841c700efb749"} Dec 10 00:34:03 crc kubenswrapper[4884]: I1210 00:34:03.341483 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f7d5j" event={"ID":"93607314-0a98-4da3-bfc3-5514b65e6580","Type":"ContainerStarted","Data":"83ddf8adbc3dce0f60845ddb5598a277aa903889b7996d3799b26af471bfa150"} Dec 10 00:34:03 crc kubenswrapper[4884]: I1210 00:34:03.346255 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnp4t" event={"ID":"8eba9bb1-b07d-4d37-99f5-ddd952f9f681","Type":"ContainerStarted","Data":"68e777288d56b4a49c879d5123cf09286ce9e7da78d79013c209a9aee7b89d1c"} Dec 10 00:34:03 crc kubenswrapper[4884]: I1210 00:34:03.352947 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sxshd" podStartSLOduration=3.5149026279999998 podStartE2EDuration="1m2.352895104s" podCreationTimestamp="2025-12-10 00:33:01 +0000 UTC" firstStartedPulling="2025-12-10 00:33:03.506155835 +0000 UTC m=+156.584112952" lastFinishedPulling="2025-12-10 00:34:02.344148311 +0000 UTC m=+215.422105428" observedRunningTime="2025-12-10 00:34:03.347835325 +0000 UTC m=+216.425792442" watchObservedRunningTime="2025-12-10 00:34:03.352895104 +0000 UTC m=+216.430852211" Dec 10 00:34:04 crc kubenswrapper[4884]: I1210 00:34:04.355393 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6pgr" event={"ID":"12a04f77-054c-4a5d-80c5-2d64fcc137af","Type":"ContainerStarted","Data":"e441723387990beb0b7021b5580eb6713a43f7ca897a7199da883d6b4adfa4c5"} Dec 10 00:34:04 crc kubenswrapper[4884]: I1210 00:34:04.357887 4884 generic.go:334] "Generic (PLEG): container finished" podID="93607314-0a98-4da3-bfc3-5514b65e6580" containerID="83ddf8adbc3dce0f60845ddb5598a277aa903889b7996d3799b26af471bfa150" exitCode=0 Dec 10 00:34:04 crc kubenswrapper[4884]: I1210 00:34:04.357953 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f7d5j" event={"ID":"93607314-0a98-4da3-bfc3-5514b65e6580","Type":"ContainerDied","Data":"83ddf8adbc3dce0f60845ddb5598a277aa903889b7996d3799b26af471bfa150"} Dec 10 00:34:04 crc kubenswrapper[4884]: I1210 00:34:04.360276 4884 generic.go:334] "Generic (PLEG): container finished" podID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" containerID="68e777288d56b4a49c879d5123cf09286ce9e7da78d79013c209a9aee7b89d1c" exitCode=0 Dec 10 00:34:04 crc kubenswrapper[4884]: I1210 00:34:04.360341 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnp4t" event={"ID":"8eba9bb1-b07d-4d37-99f5-ddd952f9f681","Type":"ContainerDied","Data":"68e777288d56b4a49c879d5123cf09286ce9e7da78d79013c209a9aee7b89d1c"} Dec 10 00:34:04 crc kubenswrapper[4884]: I1210 00:34:04.365091 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rdvc9" event={"ID":"0e6acb02-4f9e-4771-9213-955aa68cfa43","Type":"ContainerStarted","Data":"b10d107031a5c5a4b8cddcd77de7e0a5f69e049fe20fbebfc7ff542f1aaea25e"} Dec 10 00:34:04 crc kubenswrapper[4884]: I1210 00:34:04.415248 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-h6pgr" podStartSLOduration=3.119396089 podStartE2EDuration="1m1.415213668s" podCreationTimestamp="2025-12-10 00:33:03 +0000 UTC" firstStartedPulling="2025-12-10 00:33:05.555661686 +0000 UTC m=+158.633618793" lastFinishedPulling="2025-12-10 00:34:03.851479255 +0000 UTC m=+216.929436372" observedRunningTime="2025-12-10 00:34:04.398977694 +0000 UTC m=+217.476934811" watchObservedRunningTime="2025-12-10 00:34:04.415213668 +0000 UTC m=+217.493170785" Dec 10 00:34:05 crc kubenswrapper[4884]: I1210 00:34:05.378707 4884 generic.go:334] "Generic (PLEG): container finished" podID="0e6acb02-4f9e-4771-9213-955aa68cfa43" containerID="b10d107031a5c5a4b8cddcd77de7e0a5f69e049fe20fbebfc7ff542f1aaea25e" exitCode=0 Dec 10 00:34:05 crc kubenswrapper[4884]: I1210 00:34:05.378795 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rdvc9" event={"ID":"0e6acb02-4f9e-4771-9213-955aa68cfa43","Type":"ContainerDied","Data":"b10d107031a5c5a4b8cddcd77de7e0a5f69e049fe20fbebfc7ff542f1aaea25e"} Dec 10 00:34:06 crc kubenswrapper[4884]: I1210 00:34:06.386855 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f7d5j" event={"ID":"93607314-0a98-4da3-bfc3-5514b65e6580","Type":"ContainerStarted","Data":"a4f815d25e22b0417f9a6349e8e3c7e5625772e6d0de27c5f7d4a04533715488"} Dec 10 00:34:06 crc kubenswrapper[4884]: I1210 00:34:06.388634 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnp4t" event={"ID":"8eba9bb1-b07d-4d37-99f5-ddd952f9f681","Type":"ContainerStarted","Data":"ea165dc2a148b7c60377b990dd6439989607b283adce092d0d772c6df36b5fa3"} Dec 10 00:34:06 crc kubenswrapper[4884]: I1210 00:34:06.392120 4884 generic.go:334] "Generic (PLEG): container finished" podID="a5ef5504-0d0c-44d9-b007-42f470547918" containerID="7d43f27995cc3d62720d86865b978449dbaa73734a5c11a2f8fe15f8e9c47b03" exitCode=0 Dec 10 00:34:06 crc kubenswrapper[4884]: I1210 00:34:06.392181 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vclmg" event={"ID":"a5ef5504-0d0c-44d9-b007-42f470547918","Type":"ContainerDied","Data":"7d43f27995cc3d62720d86865b978449dbaa73734a5c11a2f8fe15f8e9c47b03"} Dec 10 00:34:06 crc kubenswrapper[4884]: I1210 00:34:06.397855 4884 generic.go:334] "Generic (PLEG): container finished" podID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" containerID="56f27e3fdb3e6f2616a9de0e1eeb220c7be482fdf6a6b52e609779c4c938c478" exitCode=0 Dec 10 00:34:06 crc kubenswrapper[4884]: I1210 00:34:06.397904 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gbk5t" event={"ID":"e38b0935-5b38-43cb-b5dc-f05f458aeeb5","Type":"ContainerDied","Data":"56f27e3fdb3e6f2616a9de0e1eeb220c7be482fdf6a6b52e609779c4c938c478"} Dec 10 00:34:06 crc kubenswrapper[4884]: I1210 00:34:06.444459 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-f7d5j" podStartSLOduration=3.669307725 podStartE2EDuration="1m5.44442014s" podCreationTimestamp="2025-12-10 00:33:01 +0000 UTC" firstStartedPulling="2025-12-10 00:33:03.501007058 +0000 UTC m=+156.578964175" lastFinishedPulling="2025-12-10 00:34:05.276119473 +0000 UTC m=+218.354076590" observedRunningTime="2025-12-10 00:34:06.416772385 +0000 UTC m=+219.494729502" watchObservedRunningTime="2025-12-10 00:34:06.44442014 +0000 UTC m=+219.522377267" Dec 10 00:34:06 crc kubenswrapper[4884]: I1210 00:34:06.483076 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mnp4t" podStartSLOduration=3.625244616 podStartE2EDuration="1m2.483053335s" podCreationTimestamp="2025-12-10 00:33:04 +0000 UTC" firstStartedPulling="2025-12-10 00:33:06.575817684 +0000 UTC m=+159.653774801" lastFinishedPulling="2025-12-10 00:34:05.433626403 +0000 UTC m=+218.511583520" observedRunningTime="2025-12-10 00:34:06.480385123 +0000 UTC m=+219.558342240" watchObservedRunningTime="2025-12-10 00:34:06.483053335 +0000 UTC m=+219.561010462" Dec 10 00:34:10 crc kubenswrapper[4884]: I1210 00:34:10.437089 4884 generic.go:334] "Generic (PLEG): container finished" podID="864f39a1-a244-429d-8974-46c6a30aefbb" containerID="0078e58f718b9905a36c652c39bdb00236be5c2d05166a886ea09f779fc09502" exitCode=0 Dec 10 00:34:10 crc kubenswrapper[4884]: I1210 00:34:10.437309 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7qsx6" event={"ID":"864f39a1-a244-429d-8974-46c6a30aefbb","Type":"ContainerDied","Data":"0078e58f718b9905a36c652c39bdb00236be5c2d05166a886ea09f779fc09502"} Dec 10 00:34:11 crc kubenswrapper[4884]: I1210 00:34:11.448061 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rdvc9" event={"ID":"0e6acb02-4f9e-4771-9213-955aa68cfa43","Type":"ContainerStarted","Data":"e6d3088d82f1063697f86d936085132d571701b4a94fcb2ad70838fa2320141c"} Dec 10 00:34:11 crc kubenswrapper[4884]: I1210 00:34:11.473343 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rdvc9" podStartSLOduration=3.92138533 podStartE2EDuration="1m6.473319934s" podCreationTimestamp="2025-12-10 00:33:05 +0000 UTC" firstStartedPulling="2025-12-10 00:33:07.671742671 +0000 UTC m=+160.749699788" lastFinishedPulling="2025-12-10 00:34:10.223677275 +0000 UTC m=+223.301634392" observedRunningTime="2025-12-10 00:34:11.472349877 +0000 UTC m=+224.550307004" watchObservedRunningTime="2025-12-10 00:34:11.473319934 +0000 UTC m=+224.551277061" Dec 10 00:34:12 crc kubenswrapper[4884]: I1210 00:34:12.157034 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:34:12 crc kubenswrapper[4884]: I1210 00:34:12.157118 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:34:12 crc kubenswrapper[4884]: I1210 00:34:12.287712 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:34:12 crc kubenswrapper[4884]: I1210 00:34:12.288058 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:34:12 crc kubenswrapper[4884]: I1210 00:34:12.351735 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:34:12 crc kubenswrapper[4884]: I1210 00:34:12.358006 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:34:12 crc kubenswrapper[4884]: I1210 00:34:12.490399 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:34:12 crc kubenswrapper[4884]: I1210 00:34:12.493073 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:34:14 crc kubenswrapper[4884]: I1210 00:34:14.263001 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:34:14 crc kubenswrapper[4884]: I1210 00:34:14.263541 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:34:14 crc kubenswrapper[4884]: I1210 00:34:14.309338 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:34:14 crc kubenswrapper[4884]: I1210 00:34:14.513579 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:34:15 crc kubenswrapper[4884]: I1210 00:34:15.295955 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:34:15 crc kubenswrapper[4884]: I1210 00:34:15.296328 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:34:15 crc kubenswrapper[4884]: I1210 00:34:15.355770 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:34:15 crc kubenswrapper[4884]: I1210 00:34:15.471746 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gbk5t" event={"ID":"e38b0935-5b38-43cb-b5dc-f05f458aeeb5","Type":"ContainerStarted","Data":"714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096"} Dec 10 00:34:15 crc kubenswrapper[4884]: I1210 00:34:15.528864 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:34:15 crc kubenswrapper[4884]: I1210 00:34:15.680102 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:34:15 crc kubenswrapper[4884]: I1210 00:34:15.680160 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:34:16 crc kubenswrapper[4884]: I1210 00:34:16.500115 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gbk5t" podStartSLOduration=5.850684477 podStartE2EDuration="1m14.500094479s" podCreationTimestamp="2025-12-10 00:33:02 +0000 UTC" firstStartedPulling="2025-12-10 00:33:04.526508748 +0000 UTC m=+157.604465865" lastFinishedPulling="2025-12-10 00:34:13.17591875 +0000 UTC m=+226.253875867" observedRunningTime="2025-12-10 00:34:16.498271779 +0000 UTC m=+229.576228916" watchObservedRunningTime="2025-12-10 00:34:16.500094479 +0000 UTC m=+229.578051586" Dec 10 00:34:16 crc kubenswrapper[4884]: I1210 00:34:16.723930 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rdvc9" podUID="0e6acb02-4f9e-4771-9213-955aa68cfa43" containerName="registry-server" probeResult="failure" output=< Dec 10 00:34:16 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 00:34:16 crc kubenswrapper[4884]: > Dec 10 00:34:17 crc kubenswrapper[4884]: I1210 00:34:17.486414 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7qsx6" event={"ID":"864f39a1-a244-429d-8974-46c6a30aefbb","Type":"ContainerStarted","Data":"ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402"} Dec 10 00:34:17 crc kubenswrapper[4884]: I1210 00:34:17.489645 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vclmg" event={"ID":"a5ef5504-0d0c-44d9-b007-42f470547918","Type":"ContainerStarted","Data":"a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07"} Dec 10 00:34:17 crc kubenswrapper[4884]: I1210 00:34:17.514127 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7qsx6" podStartSLOduration=3.776212794 podStartE2EDuration="1m13.514095955s" podCreationTimestamp="2025-12-10 00:33:04 +0000 UTC" firstStartedPulling="2025-12-10 00:33:06.594943571 +0000 UTC m=+159.672900678" lastFinishedPulling="2025-12-10 00:34:16.332826712 +0000 UTC m=+229.410783839" observedRunningTime="2025-12-10 00:34:17.511008169 +0000 UTC m=+230.588965296" watchObservedRunningTime="2025-12-10 00:34:17.514095955 +0000 UTC m=+230.592053092" Dec 10 00:34:17 crc kubenswrapper[4884]: I1210 00:34:17.540804 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vclmg" podStartSLOduration=2.7172538040000003 podStartE2EDuration="1m15.540780813s" podCreationTimestamp="2025-12-10 00:33:02 +0000 UTC" firstStartedPulling="2025-12-10 00:33:03.502877808 +0000 UTC m=+156.580834925" lastFinishedPulling="2025-12-10 00:34:16.326404817 +0000 UTC m=+229.404361934" observedRunningTime="2025-12-10 00:34:17.535648352 +0000 UTC m=+230.613605469" watchObservedRunningTime="2025-12-10 00:34:17.540780813 +0000 UTC m=+230.618737940" Dec 10 00:34:18 crc kubenswrapper[4884]: I1210 00:34:18.101789 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:34:18 crc kubenswrapper[4884]: I1210 00:34:18.101887 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:34:18 crc kubenswrapper[4884]: I1210 00:34:18.101992 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:34:18 crc kubenswrapper[4884]: I1210 00:34:18.102952 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 00:34:18 crc kubenswrapper[4884]: I1210 00:34:18.103153 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7" gracePeriod=600 Dec 10 00:34:19 crc kubenswrapper[4884]: I1210 00:34:19.507227 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7" exitCode=0 Dec 10 00:34:19 crc kubenswrapper[4884]: I1210 00:34:19.507343 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7"} Dec 10 00:34:20 crc kubenswrapper[4884]: I1210 00:34:20.516630 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"ecf042e581c7c61981c2950a871cf1332176d990f8fd754c1ccf327fef18a9db"} Dec 10 00:34:22 crc kubenswrapper[4884]: I1210 00:34:22.456160 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:34:22 crc kubenswrapper[4884]: I1210 00:34:22.456669 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:34:22 crc kubenswrapper[4884]: I1210 00:34:22.527615 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:34:22 crc kubenswrapper[4884]: I1210 00:34:22.600047 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:34:22 crc kubenswrapper[4884]: I1210 00:34:22.669794 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:34:22 crc kubenswrapper[4884]: I1210 00:34:22.669855 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:34:22 crc kubenswrapper[4884]: I1210 00:34:22.738040 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:34:23 crc kubenswrapper[4884]: I1210 00:34:23.620626 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:34:24 crc kubenswrapper[4884]: I1210 00:34:24.142570 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vclmg"] Dec 10 00:34:24 crc kubenswrapper[4884]: I1210 00:34:24.541588 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vclmg" podUID="a5ef5504-0d0c-44d9-b007-42f470547918" containerName="registry-server" containerID="cri-o://a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07" gracePeriod=2 Dec 10 00:34:24 crc kubenswrapper[4884]: I1210 00:34:24.662927 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:34:24 crc kubenswrapper[4884]: I1210 00:34:24.664335 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:34:24 crc kubenswrapper[4884]: I1210 00:34:24.731944 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:34:24 crc kubenswrapper[4884]: I1210 00:34:24.998472 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.074614 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdbqd\" (UniqueName: \"kubernetes.io/projected/a5ef5504-0d0c-44d9-b007-42f470547918-kube-api-access-cdbqd\") pod \"a5ef5504-0d0c-44d9-b007-42f470547918\" (UID: \"a5ef5504-0d0c-44d9-b007-42f470547918\") " Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.074754 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5ef5504-0d0c-44d9-b007-42f470547918-utilities\") pod \"a5ef5504-0d0c-44d9-b007-42f470547918\" (UID: \"a5ef5504-0d0c-44d9-b007-42f470547918\") " Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.074818 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5ef5504-0d0c-44d9-b007-42f470547918-catalog-content\") pod \"a5ef5504-0d0c-44d9-b007-42f470547918\" (UID: \"a5ef5504-0d0c-44d9-b007-42f470547918\") " Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.076526 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5ef5504-0d0c-44d9-b007-42f470547918-utilities" (OuterVolumeSpecName: "utilities") pod "a5ef5504-0d0c-44d9-b007-42f470547918" (UID: "a5ef5504-0d0c-44d9-b007-42f470547918"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.083945 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5ef5504-0d0c-44d9-b007-42f470547918-kube-api-access-cdbqd" (OuterVolumeSpecName: "kube-api-access-cdbqd") pod "a5ef5504-0d0c-44d9-b007-42f470547918" (UID: "a5ef5504-0d0c-44d9-b007-42f470547918"). InnerVolumeSpecName "kube-api-access-cdbqd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.133368 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5ef5504-0d0c-44d9-b007-42f470547918-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a5ef5504-0d0c-44d9-b007-42f470547918" (UID: "a5ef5504-0d0c-44d9-b007-42f470547918"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.144231 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gbk5t"] Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.176959 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5ef5504-0d0c-44d9-b007-42f470547918-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.177003 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdbqd\" (UniqueName: \"kubernetes.io/projected/a5ef5504-0d0c-44d9-b007-42f470547918-kube-api-access-cdbqd\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.177017 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5ef5504-0d0c-44d9-b007-42f470547918-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.550548 4884 generic.go:334] "Generic (PLEG): container finished" podID="a5ef5504-0d0c-44d9-b007-42f470547918" containerID="a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07" exitCode=0 Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.550687 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vclmg" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.550710 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vclmg" event={"ID":"a5ef5504-0d0c-44d9-b007-42f470547918","Type":"ContainerDied","Data":"a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07"} Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.550798 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vclmg" event={"ID":"a5ef5504-0d0c-44d9-b007-42f470547918","Type":"ContainerDied","Data":"26afdc8c7a228528614d401ae128b942cff7b3f55e43bb3047434edde6b9f0a5"} Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.550825 4884 scope.go:117] "RemoveContainer" containerID="a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.551619 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gbk5t" podUID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" containerName="registry-server" containerID="cri-o://714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096" gracePeriod=2 Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.582894 4884 scope.go:117] "RemoveContainer" containerID="7d43f27995cc3d62720d86865b978449dbaa73734a5c11a2f8fe15f8e9c47b03" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.587775 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vclmg"] Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.590471 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vclmg"] Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.614216 4884 scope.go:117] "RemoveContainer" containerID="74b43700bdf763204509449ab8b11a389cdc8f3c0998c7db1955bbd49bb512c4" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.623724 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.673635 4884 scope.go:117] "RemoveContainer" containerID="a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07" Dec 10 00:34:25 crc kubenswrapper[4884]: E1210 00:34:25.674820 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07\": container with ID starting with a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07 not found: ID does not exist" containerID="a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.674899 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07"} err="failed to get container status \"a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07\": rpc error: code = NotFound desc = could not find container \"a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07\": container with ID starting with a7cd24fcc9f8404407597eb91845645ad2f33494fda53361e620aaf5d73fff07 not found: ID does not exist" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.674957 4884 scope.go:117] "RemoveContainer" containerID="7d43f27995cc3d62720d86865b978449dbaa73734a5c11a2f8fe15f8e9c47b03" Dec 10 00:34:25 crc kubenswrapper[4884]: E1210 00:34:25.675409 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d43f27995cc3d62720d86865b978449dbaa73734a5c11a2f8fe15f8e9c47b03\": container with ID starting with 7d43f27995cc3d62720d86865b978449dbaa73734a5c11a2f8fe15f8e9c47b03 not found: ID does not exist" containerID="7d43f27995cc3d62720d86865b978449dbaa73734a5c11a2f8fe15f8e9c47b03" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.675473 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d43f27995cc3d62720d86865b978449dbaa73734a5c11a2f8fe15f8e9c47b03"} err="failed to get container status \"7d43f27995cc3d62720d86865b978449dbaa73734a5c11a2f8fe15f8e9c47b03\": rpc error: code = NotFound desc = could not find container \"7d43f27995cc3d62720d86865b978449dbaa73734a5c11a2f8fe15f8e9c47b03\": container with ID starting with 7d43f27995cc3d62720d86865b978449dbaa73734a5c11a2f8fe15f8e9c47b03 not found: ID does not exist" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.675518 4884 scope.go:117] "RemoveContainer" containerID="74b43700bdf763204509449ab8b11a389cdc8f3c0998c7db1955bbd49bb512c4" Dec 10 00:34:25 crc kubenswrapper[4884]: E1210 00:34:25.675998 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74b43700bdf763204509449ab8b11a389cdc8f3c0998c7db1955bbd49bb512c4\": container with ID starting with 74b43700bdf763204509449ab8b11a389cdc8f3c0998c7db1955bbd49bb512c4 not found: ID does not exist" containerID="74b43700bdf763204509449ab8b11a389cdc8f3c0998c7db1955bbd49bb512c4" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.676022 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74b43700bdf763204509449ab8b11a389cdc8f3c0998c7db1955bbd49bb512c4"} err="failed to get container status \"74b43700bdf763204509449ab8b11a389cdc8f3c0998c7db1955bbd49bb512c4\": rpc error: code = NotFound desc = could not find container \"74b43700bdf763204509449ab8b11a389cdc8f3c0998c7db1955bbd49bb512c4\": container with ID starting with 74b43700bdf763204509449ab8b11a389cdc8f3c0998c7db1955bbd49bb512c4 not found: ID does not exist" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.721055 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.775483 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.926779 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.995113 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-utilities\") pod \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\" (UID: \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\") " Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.995208 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-catalog-content\") pod \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\" (UID: \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\") " Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.995379 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vks2\" (UniqueName: \"kubernetes.io/projected/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-kube-api-access-6vks2\") pod \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\" (UID: \"e38b0935-5b38-43cb-b5dc-f05f458aeeb5\") " Dec 10 00:34:25 crc kubenswrapper[4884]: I1210 00:34:25.996731 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-utilities" (OuterVolumeSpecName: "utilities") pod "e38b0935-5b38-43cb-b5dc-f05f458aeeb5" (UID: "e38b0935-5b38-43cb-b5dc-f05f458aeeb5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.003961 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-kube-api-access-6vks2" (OuterVolumeSpecName: "kube-api-access-6vks2") pod "e38b0935-5b38-43cb-b5dc-f05f458aeeb5" (UID: "e38b0935-5b38-43cb-b5dc-f05f458aeeb5"). InnerVolumeSpecName "kube-api-access-6vks2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.042136 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e38b0935-5b38-43cb-b5dc-f05f458aeeb5" (UID: "e38b0935-5b38-43cb-b5dc-f05f458aeeb5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.097146 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.097203 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vks2\" (UniqueName: \"kubernetes.io/projected/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-kube-api-access-6vks2\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.097217 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e38b0935-5b38-43cb-b5dc-f05f458aeeb5-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.567547 4884 generic.go:334] "Generic (PLEG): container finished" podID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" containerID="714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096" exitCode=0 Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.567665 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gbk5t" event={"ID":"e38b0935-5b38-43cb-b5dc-f05f458aeeb5","Type":"ContainerDied","Data":"714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096"} Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.567783 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gbk5t" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.568214 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gbk5t" event={"ID":"e38b0935-5b38-43cb-b5dc-f05f458aeeb5","Type":"ContainerDied","Data":"331a7bd6f9020b60e259fcaf35a8ef1665716c0a2d2a47a058ab56ed80c1bc24"} Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.568272 4884 scope.go:117] "RemoveContainer" containerID="714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.604927 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gbk5t"] Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.609579 4884 scope.go:117] "RemoveContainer" containerID="56f27e3fdb3e6f2616a9de0e1eeb220c7be482fdf6a6b52e609779c4c938c478" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.611314 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gbk5t"] Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.633165 4884 scope.go:117] "RemoveContainer" containerID="92ee8c9c946676c0e684a29843e748eb97f1e38329a2690639fb4eb0bbeba520" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.658384 4884 scope.go:117] "RemoveContainer" containerID="714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096" Dec 10 00:34:26 crc kubenswrapper[4884]: E1210 00:34:26.659008 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096\": container with ID starting with 714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096 not found: ID does not exist" containerID="714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.659060 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096"} err="failed to get container status \"714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096\": rpc error: code = NotFound desc = could not find container \"714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096\": container with ID starting with 714d58188416afb0c0bd8fa60726e3da74102b947e0c7c69c77f6eec09368096 not found: ID does not exist" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.659089 4884 scope.go:117] "RemoveContainer" containerID="56f27e3fdb3e6f2616a9de0e1eeb220c7be482fdf6a6b52e609779c4c938c478" Dec 10 00:34:26 crc kubenswrapper[4884]: E1210 00:34:26.659665 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56f27e3fdb3e6f2616a9de0e1eeb220c7be482fdf6a6b52e609779c4c938c478\": container with ID starting with 56f27e3fdb3e6f2616a9de0e1eeb220c7be482fdf6a6b52e609779c4c938c478 not found: ID does not exist" containerID="56f27e3fdb3e6f2616a9de0e1eeb220c7be482fdf6a6b52e609779c4c938c478" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.659700 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56f27e3fdb3e6f2616a9de0e1eeb220c7be482fdf6a6b52e609779c4c938c478"} err="failed to get container status \"56f27e3fdb3e6f2616a9de0e1eeb220c7be482fdf6a6b52e609779c4c938c478\": rpc error: code = NotFound desc = could not find container \"56f27e3fdb3e6f2616a9de0e1eeb220c7be482fdf6a6b52e609779c4c938c478\": container with ID starting with 56f27e3fdb3e6f2616a9de0e1eeb220c7be482fdf6a6b52e609779c4c938c478 not found: ID does not exist" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.659719 4884 scope.go:117] "RemoveContainer" containerID="92ee8c9c946676c0e684a29843e748eb97f1e38329a2690639fb4eb0bbeba520" Dec 10 00:34:26 crc kubenswrapper[4884]: E1210 00:34:26.660189 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92ee8c9c946676c0e684a29843e748eb97f1e38329a2690639fb4eb0bbeba520\": container with ID starting with 92ee8c9c946676c0e684a29843e748eb97f1e38329a2690639fb4eb0bbeba520 not found: ID does not exist" containerID="92ee8c9c946676c0e684a29843e748eb97f1e38329a2690639fb4eb0bbeba520" Dec 10 00:34:26 crc kubenswrapper[4884]: I1210 00:34:26.660276 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92ee8c9c946676c0e684a29843e748eb97f1e38329a2690639fb4eb0bbeba520"} err="failed to get container status \"92ee8c9c946676c0e684a29843e748eb97f1e38329a2690639fb4eb0bbeba520\": rpc error: code = NotFound desc = could not find container \"92ee8c9c946676c0e684a29843e748eb97f1e38329a2690639fb4eb0bbeba520\": container with ID starting with 92ee8c9c946676c0e684a29843e748eb97f1e38329a2690639fb4eb0bbeba520 not found: ID does not exist" Dec 10 00:34:27 crc kubenswrapper[4884]: I1210 00:34:27.297861 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5ef5504-0d0c-44d9-b007-42f470547918" path="/var/lib/kubelet/pods/a5ef5504-0d0c-44d9-b007-42f470547918/volumes" Dec 10 00:34:27 crc kubenswrapper[4884]: I1210 00:34:27.299103 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" path="/var/lib/kubelet/pods/e38b0935-5b38-43cb-b5dc-f05f458aeeb5/volumes" Dec 10 00:34:27 crc kubenswrapper[4884]: I1210 00:34:27.543381 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7qsx6"] Dec 10 00:34:27 crc kubenswrapper[4884]: I1210 00:34:27.997734 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" podUID="4e7d9725-55d4-4230-a690-6f1a647e353d" containerName="oauth-openshift" containerID="cri-o://e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38" gracePeriod=15 Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.475612 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.535879 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4e7d9725-55d4-4230-a690-6f1a647e353d-audit-dir\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536003 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-trusted-ca-bundle\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536054 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-serving-cert\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536113 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-login\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536127 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4e7d9725-55d4-4230-a690-6f1a647e353d-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536157 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-service-ca\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536199 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-audit-policies\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536271 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-provider-selection\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536320 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mw4q\" (UniqueName: \"kubernetes.io/projected/4e7d9725-55d4-4230-a690-6f1a647e353d-kube-api-access-4mw4q\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536357 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-router-certs\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536429 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-ocp-branding-template\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536506 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-session\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536582 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-error\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536620 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-cliconfig\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536665 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-idp-0-file-data\") pod \"4e7d9725-55d4-4230-a690-6f1a647e353d\" (UID: \"4e7d9725-55d4-4230-a690-6f1a647e353d\") " Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.536973 4884 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4e7d9725-55d4-4230-a690-6f1a647e353d-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.537490 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.539063 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.540821 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.544200 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.546130 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.546717 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.547398 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.547421 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e7d9725-55d4-4230-a690-6f1a647e353d-kube-api-access-4mw4q" (OuterVolumeSpecName: "kube-api-access-4mw4q") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "kube-api-access-4mw4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.547601 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.547857 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.553053 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.553394 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.556263 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "4e7d9725-55d4-4230-a690-6f1a647e353d" (UID: "4e7d9725-55d4-4230-a690-6f1a647e353d"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.590906 4884 generic.go:334] "Generic (PLEG): container finished" podID="4e7d9725-55d4-4230-a690-6f1a647e353d" containerID="e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38" exitCode=0 Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.591235 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7qsx6" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" containerName="registry-server" containerID="cri-o://ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402" gracePeriod=2 Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.591425 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.592588 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" event={"ID":"4e7d9725-55d4-4230-a690-6f1a647e353d","Type":"ContainerDied","Data":"e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38"} Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.592645 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-qjqjf" event={"ID":"4e7d9725-55d4-4230-a690-6f1a647e353d","Type":"ContainerDied","Data":"f4bb082b46f18d958cabd77a3e3b70c199c941e90aab8bc8d57d0a1e861063cc"} Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.592674 4884 scope.go:117] "RemoveContainer" containerID="e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.631273 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qjqjf"] Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.631963 4884 scope.go:117] "RemoveContainer" containerID="e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.632472 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38\": container with ID starting with e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38 not found: ID does not exist" containerID="e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.632537 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38"} err="failed to get container status \"e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38\": rpc error: code = NotFound desc = could not find container \"e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38\": container with ID starting with e7950a45a0bb2604efbec598ca5b84ed7b5391c9bca45ab68af2fbbc330c2f38 not found: ID does not exist" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.635332 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-qjqjf"] Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.638803 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.638855 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.638878 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.638898 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.638922 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.638941 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.638963 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.639045 4884 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4e7d9725-55d4-4230-a690-6f1a647e353d-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.639067 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.639088 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4mw4q\" (UniqueName: \"kubernetes.io/projected/4e7d9725-55d4-4230-a690-6f1a647e353d-kube-api-access-4mw4q\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.639109 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.639130 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.639155 4884 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4e7d9725-55d4-4230-a690-6f1a647e353d-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.675153 4884 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.675744 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5ef5504-0d0c-44d9-b007-42f470547918" containerName="registry-server" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.675789 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5ef5504-0d0c-44d9-b007-42f470547918" containerName="registry-server" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.675823 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5ef5504-0d0c-44d9-b007-42f470547918" containerName="extract-utilities" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.675843 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5ef5504-0d0c-44d9-b007-42f470547918" containerName="extract-utilities" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.675875 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5ef5504-0d0c-44d9-b007-42f470547918" containerName="extract-content" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.675894 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5ef5504-0d0c-44d9-b007-42f470547918" containerName="extract-content" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.675921 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" containerName="registry-server" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.675938 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" containerName="registry-server" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.675971 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" containerName="extract-content" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.675989 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" containerName="extract-content" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.676021 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e7d9725-55d4-4230-a690-6f1a647e353d" containerName="oauth-openshift" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.676039 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e7d9725-55d4-4230-a690-6f1a647e353d" containerName="oauth-openshift" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.676067 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f" containerName="pruner" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.676086 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f" containerName="pruner" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.676114 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" containerName="extract-utilities" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.676132 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" containerName="extract-utilities" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.676413 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5ef5504-0d0c-44d9-b007-42f470547918" containerName="registry-server" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.676487 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e38b0935-5b38-43cb-b5dc-f05f458aeeb5" containerName="registry-server" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.676516 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe3aed47-0ea2-4bbc-b9ba-8e4257efed9f" containerName="pruner" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.676546 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e7d9725-55d4-4230-a690-6f1a647e353d" containerName="oauth-openshift" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.677305 4884 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.677513 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.677874 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e" gracePeriod=15 Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.677973 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8" gracePeriod=15 Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.678095 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7" gracePeriod=15 Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.678111 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa" gracePeriod=15 Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.677903 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32" gracePeriod=15 Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.679644 4884 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.679908 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.679932 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.679959 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.679976 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.680006 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.680023 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.680050 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.680066 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.680099 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.680116 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.680140 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.680155 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.680384 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.680411 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.680524 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.680553 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.680624 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.680649 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 10 00:34:28 crc kubenswrapper[4884]: E1210 00:34:28.681171 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.681263 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.740324 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.740603 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.740855 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.741024 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.741159 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.741305 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.741478 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.741748 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.842605 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.842662 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.842706 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.842737 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.842747 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.842779 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.842822 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.842840 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.842869 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.842883 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.842923 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.842964 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.843020 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.843020 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.843080 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:28 crc kubenswrapper[4884]: I1210 00:34:28.843050 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.056682 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.057842 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.058479 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.147868 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g7mb4\" (UniqueName: \"kubernetes.io/projected/864f39a1-a244-429d-8974-46c6a30aefbb-kube-api-access-g7mb4\") pod \"864f39a1-a244-429d-8974-46c6a30aefbb\" (UID: \"864f39a1-a244-429d-8974-46c6a30aefbb\") " Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.148002 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/864f39a1-a244-429d-8974-46c6a30aefbb-utilities\") pod \"864f39a1-a244-429d-8974-46c6a30aefbb\" (UID: \"864f39a1-a244-429d-8974-46c6a30aefbb\") " Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.148084 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/864f39a1-a244-429d-8974-46c6a30aefbb-catalog-content\") pod \"864f39a1-a244-429d-8974-46c6a30aefbb\" (UID: \"864f39a1-a244-429d-8974-46c6a30aefbb\") " Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.149240 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/864f39a1-a244-429d-8974-46c6a30aefbb-utilities" (OuterVolumeSpecName: "utilities") pod "864f39a1-a244-429d-8974-46c6a30aefbb" (UID: "864f39a1-a244-429d-8974-46c6a30aefbb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.155084 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/864f39a1-a244-429d-8974-46c6a30aefbb-kube-api-access-g7mb4" (OuterVolumeSpecName: "kube-api-access-g7mb4") pod "864f39a1-a244-429d-8974-46c6a30aefbb" (UID: "864f39a1-a244-429d-8974-46c6a30aefbb"). InnerVolumeSpecName "kube-api-access-g7mb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.193052 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/864f39a1-a244-429d-8974-46c6a30aefbb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "864f39a1-a244-429d-8974-46c6a30aefbb" (UID: "864f39a1-a244-429d-8974-46c6a30aefbb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.250519 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/864f39a1-a244-429d-8974-46c6a30aefbb-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.250702 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g7mb4\" (UniqueName: \"kubernetes.io/projected/864f39a1-a244-429d-8974-46c6a30aefbb-kube-api-access-g7mb4\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.250797 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/864f39a1-a244-429d-8974-46c6a30aefbb-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.300171 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e7d9725-55d4-4230-a690-6f1a647e353d" path="/var/lib/kubelet/pods/4e7d9725-55d4-4230-a690-6f1a647e353d/volumes" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.599263 4884 generic.go:334] "Generic (PLEG): container finished" podID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" containerID="cbfd7afe7432d5212470b8f460f6ae65c034cf10fc3533900384ff7caab9af73" exitCode=0 Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.599345 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7f04c080-cd3a-44c8-821e-1c9ab9baff78","Type":"ContainerDied","Data":"cbfd7afe7432d5212470b8f460f6ae65c034cf10fc3533900384ff7caab9af73"} Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.600194 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.600674 4884 status_manager.go:851] "Failed to get status for pod" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.602490 4884 generic.go:334] "Generic (PLEG): container finished" podID="864f39a1-a244-429d-8974-46c6a30aefbb" containerID="ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402" exitCode=0 Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.602559 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7qsx6" event={"ID":"864f39a1-a244-429d-8974-46c6a30aefbb","Type":"ContainerDied","Data":"ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402"} Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.602598 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7qsx6" event={"ID":"864f39a1-a244-429d-8974-46c6a30aefbb","Type":"ContainerDied","Data":"b36dc8b37356893f95ebdf10fcfb6e6b2a53d23454831495fb5788be964ad0c5"} Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.602621 4884 scope.go:117] "RemoveContainer" containerID="ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.602795 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7qsx6" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.603854 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.604167 4884 status_manager.go:851] "Failed to get status for pod" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.606852 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.608727 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.609723 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.610366 4884 status_manager.go:851] "Failed to get status for pod" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.610762 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32" exitCode=0 Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.610787 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7" exitCode=0 Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.610797 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8" exitCode=0 Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.610815 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa" exitCode=2 Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.621839 4884 scope.go:117] "RemoveContainer" containerID="0078e58f718b9905a36c652c39bdb00236be5c2d05166a886ea09f779fc09502" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.638834 4884 scope.go:117] "RemoveContainer" containerID="e7b3efabf0b9726036739eced9bace6e82c860658f8d7d8394ba901e417197d2" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.659276 4884 scope.go:117] "RemoveContainer" containerID="ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402" Dec 10 00:34:29 crc kubenswrapper[4884]: E1210 00:34:29.660010 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402\": container with ID starting with ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402 not found: ID does not exist" containerID="ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.660068 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402"} err="failed to get container status \"ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402\": rpc error: code = NotFound desc = could not find container \"ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402\": container with ID starting with ac7b621f3b13ebf233f6e6716f734c042ff09ca44747b3ad8f43013d03fa4402 not found: ID does not exist" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.660110 4884 scope.go:117] "RemoveContainer" containerID="0078e58f718b9905a36c652c39bdb00236be5c2d05166a886ea09f779fc09502" Dec 10 00:34:29 crc kubenswrapper[4884]: E1210 00:34:29.660733 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0078e58f718b9905a36c652c39bdb00236be5c2d05166a886ea09f779fc09502\": container with ID starting with 0078e58f718b9905a36c652c39bdb00236be5c2d05166a886ea09f779fc09502 not found: ID does not exist" containerID="0078e58f718b9905a36c652c39bdb00236be5c2d05166a886ea09f779fc09502" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.660799 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0078e58f718b9905a36c652c39bdb00236be5c2d05166a886ea09f779fc09502"} err="failed to get container status \"0078e58f718b9905a36c652c39bdb00236be5c2d05166a886ea09f779fc09502\": rpc error: code = NotFound desc = could not find container \"0078e58f718b9905a36c652c39bdb00236be5c2d05166a886ea09f779fc09502\": container with ID starting with 0078e58f718b9905a36c652c39bdb00236be5c2d05166a886ea09f779fc09502 not found: ID does not exist" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.660839 4884 scope.go:117] "RemoveContainer" containerID="e7b3efabf0b9726036739eced9bace6e82c860658f8d7d8394ba901e417197d2" Dec 10 00:34:29 crc kubenswrapper[4884]: E1210 00:34:29.661161 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7b3efabf0b9726036739eced9bace6e82c860658f8d7d8394ba901e417197d2\": container with ID starting with e7b3efabf0b9726036739eced9bace6e82c860658f8d7d8394ba901e417197d2 not found: ID does not exist" containerID="e7b3efabf0b9726036739eced9bace6e82c860658f8d7d8394ba901e417197d2" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.661213 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7b3efabf0b9726036739eced9bace6e82c860658f8d7d8394ba901e417197d2"} err="failed to get container status \"e7b3efabf0b9726036739eced9bace6e82c860658f8d7d8394ba901e417197d2\": rpc error: code = NotFound desc = could not find container \"e7b3efabf0b9726036739eced9bace6e82c860658f8d7d8394ba901e417197d2\": container with ID starting with e7b3efabf0b9726036739eced9bace6e82c860658f8d7d8394ba901e417197d2 not found: ID does not exist" Dec 10 00:34:29 crc kubenswrapper[4884]: I1210 00:34:29.661240 4884 scope.go:117] "RemoveContainer" containerID="e46c68d3a0094348ebcbe7a56919c7b5faea5686f4ebd8ada7014d7e64377dc4" Dec 10 00:34:30 crc kubenswrapper[4884]: I1210 00:34:30.627069 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.103389 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.104594 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.105065 4884 status_manager.go:851] "Failed to get status for pod" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.113216 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.114086 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.114421 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.114797 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.115158 4884 status_manager.go:851] "Failed to get status for pod" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.192159 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.192251 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7f04c080-cd3a-44c8-821e-1c9ab9baff78-var-lock\") pod \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\" (UID: \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\") " Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.192318 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.192464 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7f04c080-cd3a-44c8-821e-1c9ab9baff78-kube-api-access\") pod \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\" (UID: \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\") " Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.192564 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7f04c080-cd3a-44c8-821e-1c9ab9baff78-kubelet-dir\") pod \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\" (UID: \"7f04c080-cd3a-44c8-821e-1c9ab9baff78\") " Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.192610 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.192790 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.192852 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7f04c080-cd3a-44c8-821e-1c9ab9baff78-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7f04c080-cd3a-44c8-821e-1c9ab9baff78" (UID: "7f04c080-cd3a-44c8-821e-1c9ab9baff78"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.192887 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7f04c080-cd3a-44c8-821e-1c9ab9baff78-var-lock" (OuterVolumeSpecName: "var-lock") pod "7f04c080-cd3a-44c8-821e-1c9ab9baff78" (UID: "7f04c080-cd3a-44c8-821e-1c9ab9baff78"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.192942 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.192996 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.194047 4884 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7f04c080-cd3a-44c8-821e-1c9ab9baff78-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.194091 4884 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.194105 4884 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.194115 4884 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/7f04c080-cd3a-44c8-821e-1c9ab9baff78-var-lock\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.194126 4884 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.199882 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f04c080-cd3a-44c8-821e-1c9ab9baff78-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7f04c080-cd3a-44c8-821e-1c9ab9baff78" (UID: "7f04c080-cd3a-44c8-821e-1c9ab9baff78"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.295864 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7f04c080-cd3a-44c8-821e-1c9ab9baff78-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.299277 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.641491 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.642824 4884 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e" exitCode=0 Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.642933 4884 scope.go:117] "RemoveContainer" containerID="a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.643093 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.644241 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.645027 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.645758 4884 status_manager.go:851] "Failed to get status for pod" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.646596 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"7f04c080-cd3a-44c8-821e-1c9ab9baff78","Type":"ContainerDied","Data":"1cbcfb23a4d7a174c25dbfe7bafa584a8cdfa9d620bc83a790b419770b5988b5"} Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.646652 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1cbcfb23a4d7a174c25dbfe7bafa584a8cdfa9d620bc83a790b419770b5988b5" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.646738 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.649279 4884 status_manager.go:851] "Failed to get status for pod" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.649854 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.650535 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.655498 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.656019 4884 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.656532 4884 status_manager.go:851] "Failed to get status for pod" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.668633 4884 scope.go:117] "RemoveContainer" containerID="3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.691890 4884 scope.go:117] "RemoveContainer" containerID="6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.725042 4884 scope.go:117] "RemoveContainer" containerID="dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.751106 4884 scope.go:117] "RemoveContainer" containerID="94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.774665 4884 scope.go:117] "RemoveContainer" containerID="88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.796956 4884 scope.go:117] "RemoveContainer" containerID="a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32" Dec 10 00:34:31 crc kubenswrapper[4884]: E1210 00:34:31.797572 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\": container with ID starting with a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32 not found: ID does not exist" containerID="a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.797641 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32"} err="failed to get container status \"a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\": rpc error: code = NotFound desc = could not find container \"a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32\": container with ID starting with a7765b05b63ffd8949d563d41f1de4bc577242fb9720bea6b3be00806987bc32 not found: ID does not exist" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.797688 4884 scope.go:117] "RemoveContainer" containerID="3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7" Dec 10 00:34:31 crc kubenswrapper[4884]: E1210 00:34:31.798237 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\": container with ID starting with 3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7 not found: ID does not exist" containerID="3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.798292 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7"} err="failed to get container status \"3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\": rpc error: code = NotFound desc = could not find container \"3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7\": container with ID starting with 3c7d46b1aea3bf17a0ddf0aeeaa149e7859d76285529759ccfb26576764b35e7 not found: ID does not exist" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.798330 4884 scope.go:117] "RemoveContainer" containerID="6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8" Dec 10 00:34:31 crc kubenswrapper[4884]: E1210 00:34:31.798768 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\": container with ID starting with 6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8 not found: ID does not exist" containerID="6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.798798 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8"} err="failed to get container status \"6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\": rpc error: code = NotFound desc = could not find container \"6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8\": container with ID starting with 6f6078c0e4c7aa02c1467475639cb90a82f4120d6198138b136797bdab66d9e8 not found: ID does not exist" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.798816 4884 scope.go:117] "RemoveContainer" containerID="dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa" Dec 10 00:34:31 crc kubenswrapper[4884]: E1210 00:34:31.799232 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\": container with ID starting with dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa not found: ID does not exist" containerID="dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.799290 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa"} err="failed to get container status \"dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\": rpc error: code = NotFound desc = could not find container \"dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa\": container with ID starting with dd06c2ce83b7b3166032c6cd0d4d409cab14f221042fa0f3b159ccf052b001aa not found: ID does not exist" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.799309 4884 scope.go:117] "RemoveContainer" containerID="94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e" Dec 10 00:34:31 crc kubenswrapper[4884]: E1210 00:34:31.800018 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\": container with ID starting with 94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e not found: ID does not exist" containerID="94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.800064 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e"} err="failed to get container status \"94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\": rpc error: code = NotFound desc = could not find container \"94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e\": container with ID starting with 94a70f8343fbe9454a88ad91be492e04c13776dcb5f1e9a7e67606fc51c4bf5e not found: ID does not exist" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.800090 4884 scope.go:117] "RemoveContainer" containerID="88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f" Dec 10 00:34:31 crc kubenswrapper[4884]: E1210 00:34:31.800857 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\": container with ID starting with 88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f not found: ID does not exist" containerID="88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f" Dec 10 00:34:31 crc kubenswrapper[4884]: I1210 00:34:31.800888 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f"} err="failed to get container status \"88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\": rpc error: code = NotFound desc = could not find container \"88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f\": container with ID starting with 88f088504a907813ecb2a02a792a433c6359971f1cb9424c9abffa1adad3ad2f not found: ID does not exist" Dec 10 00:34:32 crc kubenswrapper[4884]: E1210 00:34:32.142884 4884 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:32 crc kubenswrapper[4884]: E1210 00:34:32.143264 4884 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:32 crc kubenswrapper[4884]: E1210 00:34:32.143552 4884 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:32 crc kubenswrapper[4884]: E1210 00:34:32.143818 4884 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:32 crc kubenswrapper[4884]: E1210 00:34:32.144067 4884 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:32 crc kubenswrapper[4884]: I1210 00:34:32.144095 4884 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Dec 10 00:34:32 crc kubenswrapper[4884]: E1210 00:34:32.145131 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="200ms" Dec 10 00:34:32 crc kubenswrapper[4884]: E1210 00:34:32.345876 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="400ms" Dec 10 00:34:32 crc kubenswrapper[4884]: E1210 00:34:32.747580 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="800ms" Dec 10 00:34:33 crc kubenswrapper[4884]: E1210 00:34:33.548723 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="1.6s" Dec 10 00:34:33 crc kubenswrapper[4884]: E1210 00:34:33.738305 4884 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.58:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:33 crc kubenswrapper[4884]: I1210 00:34:33.738906 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:33 crc kubenswrapper[4884]: W1210 00:34:33.773290 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-f8051350c881296b18684168d933e21a9a66e6ef54e9a1fdc6c361550962c781 WatchSource:0}: Error finding container f8051350c881296b18684168d933e21a9a66e6ef54e9a1fdc6c361550962c781: Status 404 returned error can't find the container with id f8051350c881296b18684168d933e21a9a66e6ef54e9a1fdc6c361550962c781 Dec 10 00:34:33 crc kubenswrapper[4884]: E1210 00:34:33.779311 4884 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.58:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187fb37047f71b71 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-10 00:34:33.777609585 +0000 UTC m=+246.855566742,LastTimestamp:2025-12-10 00:34:33.777609585 +0000 UTC m=+246.855566742,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 10 00:34:34 crc kubenswrapper[4884]: I1210 00:34:34.680691 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"c294d99ee418a00e3f071f3f078a5c0e3eed0b58b2dac452bc97a4e4bc6c9659"} Dec 10 00:34:34 crc kubenswrapper[4884]: I1210 00:34:34.681944 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"f8051350c881296b18684168d933e21a9a66e6ef54e9a1fdc6c361550962c781"} Dec 10 00:34:34 crc kubenswrapper[4884]: I1210 00:34:34.682702 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:34 crc kubenswrapper[4884]: E1210 00:34:34.682901 4884 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.58:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:34:34 crc kubenswrapper[4884]: I1210 00:34:34.683131 4884 status_manager.go:851] "Failed to get status for pod" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:35 crc kubenswrapper[4884]: E1210 00:34:35.149550 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="3.2s" Dec 10 00:34:37 crc kubenswrapper[4884]: I1210 00:34:37.294705 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:37 crc kubenswrapper[4884]: I1210 00:34:37.295689 4884 status_manager.go:851] "Failed to get status for pod" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:38 crc kubenswrapper[4884]: E1210 00:34:38.081672 4884 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.58:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187fb37047f71b71 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-10 00:34:33.777609585 +0000 UTC m=+246.855566742,LastTimestamp:2025-12-10 00:34:33.777609585 +0000 UTC m=+246.855566742,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 10 00:34:38 crc kubenswrapper[4884]: E1210 00:34:38.351490 4884 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.58:6443: connect: connection refused" interval="6.4s" Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.286480 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.287636 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.288334 4884 status_manager.go:851] "Failed to get status for pod" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.317951 4884 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="9b93f82f-1fc6-48da-9c2b-12b05ead2f08" Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.317992 4884 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="9b93f82f-1fc6-48da-9c2b-12b05ead2f08" Dec 10 00:34:40 crc kubenswrapper[4884]: E1210 00:34:40.318558 4884 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.319108 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:40 crc kubenswrapper[4884]: W1210 00:34:40.337524 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-d2817c7673a0d743768873cf4d27b0d686a1cebe98497d49375e751641efe343 WatchSource:0}: Error finding container d2817c7673a0d743768873cf4d27b0d686a1cebe98497d49375e751641efe343: Status 404 returned error can't find the container with id d2817c7673a0d743768873cf4d27b0d686a1cebe98497d49375e751641efe343 Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.725853 4884 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="2c070afba961d00b3e4f8187bd8227f0e992513d1ed74309e7d4386bf6e9f630" exitCode=0 Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.725989 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"2c070afba961d00b3e4f8187bd8227f0e992513d1ed74309e7d4386bf6e9f630"} Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.726323 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d2817c7673a0d743768873cf4d27b0d686a1cebe98497d49375e751641efe343"} Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.726939 4884 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="9b93f82f-1fc6-48da-9c2b-12b05ead2f08" Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.726965 4884 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="9b93f82f-1fc6-48da-9c2b-12b05ead2f08" Dec 10 00:34:40 crc kubenswrapper[4884]: E1210 00:34:40.727570 4884 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.728282 4884 status_manager.go:851] "Failed to get status for pod" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" pod="openshift-marketplace/redhat-marketplace-7qsx6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7qsx6\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:40 crc kubenswrapper[4884]: I1210 00:34:40.728914 4884 status_manager.go:851] "Failed to get status for pod" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.58:6443: connect: connection refused" Dec 10 00:34:41 crc kubenswrapper[4884]: I1210 00:34:41.736918 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 10 00:34:41 crc kubenswrapper[4884]: I1210 00:34:41.737154 4884 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e" exitCode=1 Dec 10 00:34:41 crc kubenswrapper[4884]: I1210 00:34:41.737200 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e"} Dec 10 00:34:41 crc kubenswrapper[4884]: I1210 00:34:41.737715 4884 scope.go:117] "RemoveContainer" containerID="a04ebcc478acc40735f6980b205dc9a9b17371006da3ece76944f312a71cd41e" Dec 10 00:34:41 crc kubenswrapper[4884]: I1210 00:34:41.747138 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"7f52e99be765106a56838326e91eb8b45bdd1f5f315b84140c9a615695c776db"} Dec 10 00:34:41 crc kubenswrapper[4884]: I1210 00:34:41.747192 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"ed35befaca54f103467e6afb572bd6d057a7ce8e5158cb2af223d7076b0b14dc"} Dec 10 00:34:41 crc kubenswrapper[4884]: I1210 00:34:41.747208 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c3ceb439e4d1194705c68fd2bd83a9e5af0bd08223b0b3e8b8b80bf2a3639dec"} Dec 10 00:34:42 crc kubenswrapper[4884]: I1210 00:34:42.754659 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 10 00:34:42 crc kubenswrapper[4884]: I1210 00:34:42.754773 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a065981cd54b846ab3777825e96d2f5296c8197e6b4caef5d852af5cb5d09952"} Dec 10 00:34:42 crc kubenswrapper[4884]: I1210 00:34:42.757546 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2e71ab8d9f70bfac9840dfcd0636dcde6a6613c575d0c178d1a5627d053957fc"} Dec 10 00:34:42 crc kubenswrapper[4884]: I1210 00:34:42.757587 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"563d7272dda81e37fa76276fe0ac7c8522c48fe50edaf48503087cd737680b6b"} Dec 10 00:34:42 crc kubenswrapper[4884]: I1210 00:34:42.757736 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:42 crc kubenswrapper[4884]: I1210 00:34:42.757863 4884 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="9b93f82f-1fc6-48da-9c2b-12b05ead2f08" Dec 10 00:34:42 crc kubenswrapper[4884]: I1210 00:34:42.757897 4884 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="9b93f82f-1fc6-48da-9c2b-12b05ead2f08" Dec 10 00:34:45 crc kubenswrapper[4884]: I1210 00:34:45.319773 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:45 crc kubenswrapper[4884]: I1210 00:34:45.320365 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:45 crc kubenswrapper[4884]: I1210 00:34:45.329374 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:47 crc kubenswrapper[4884]: I1210 00:34:47.023620 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:34:47 crc kubenswrapper[4884]: I1210 00:34:47.766205 4884 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:47 crc kubenswrapper[4884]: I1210 00:34:47.788421 4884 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="9b93f82f-1fc6-48da-9c2b-12b05ead2f08" Dec 10 00:34:47 crc kubenswrapper[4884]: I1210 00:34:47.788473 4884 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="9b93f82f-1fc6-48da-9c2b-12b05ead2f08" Dec 10 00:34:47 crc kubenswrapper[4884]: I1210 00:34:47.793297 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:34:47 crc kubenswrapper[4884]: I1210 00:34:47.796595 4884 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="8947f972-67c8-4978-b645-8c3c0c7199d8" Dec 10 00:34:48 crc kubenswrapper[4884]: I1210 00:34:48.797820 4884 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="9b93f82f-1fc6-48da-9c2b-12b05ead2f08" Dec 10 00:34:48 crc kubenswrapper[4884]: I1210 00:34:48.797870 4884 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="9b93f82f-1fc6-48da-9c2b-12b05ead2f08" Dec 10 00:34:50 crc kubenswrapper[4884]: I1210 00:34:50.087001 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:34:50 crc kubenswrapper[4884]: I1210 00:34:50.092549 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:34:53 crc kubenswrapper[4884]: I1210 00:34:53.969624 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 10 00:34:54 crc kubenswrapper[4884]: I1210 00:34:54.308229 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 10 00:34:54 crc kubenswrapper[4884]: I1210 00:34:54.741002 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 10 00:34:55 crc kubenswrapper[4884]: I1210 00:34:55.015589 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 10 00:34:55 crc kubenswrapper[4884]: I1210 00:34:55.593928 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 10 00:34:57 crc kubenswrapper[4884]: I1210 00:34:57.031959 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 10 00:34:57 crc kubenswrapper[4884]: I1210 00:34:57.301415 4884 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="8947f972-67c8-4978-b645-8c3c0c7199d8" Dec 10 00:34:58 crc kubenswrapper[4884]: I1210 00:34:58.101673 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 10 00:34:58 crc kubenswrapper[4884]: I1210 00:34:58.470687 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 10 00:34:58 crc kubenswrapper[4884]: I1210 00:34:58.606677 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 10 00:34:58 crc kubenswrapper[4884]: I1210 00:34:58.761414 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 10 00:34:58 crc kubenswrapper[4884]: I1210 00:34:58.831695 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 10 00:34:58 crc kubenswrapper[4884]: I1210 00:34:58.900020 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 10 00:34:59 crc kubenswrapper[4884]: I1210 00:34:59.143546 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 10 00:34:59 crc kubenswrapper[4884]: I1210 00:34:59.309037 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 10 00:34:59 crc kubenswrapper[4884]: I1210 00:34:59.345703 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 10 00:34:59 crc kubenswrapper[4884]: I1210 00:34:59.473158 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 10 00:34:59 crc kubenswrapper[4884]: I1210 00:34:59.730200 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 10 00:34:59 crc kubenswrapper[4884]: I1210 00:34:59.812220 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 10 00:34:59 crc kubenswrapper[4884]: I1210 00:34:59.839303 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 10 00:34:59 crc kubenswrapper[4884]: I1210 00:34:59.878717 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 10 00:34:59 crc kubenswrapper[4884]: I1210 00:34:59.920636 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 10 00:35:00 crc kubenswrapper[4884]: I1210 00:35:00.001373 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 10 00:35:00 crc kubenswrapper[4884]: I1210 00:35:00.356758 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 10 00:35:00 crc kubenswrapper[4884]: I1210 00:35:00.396506 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 10 00:35:00 crc kubenswrapper[4884]: I1210 00:35:00.592181 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 10 00:35:00 crc kubenswrapper[4884]: I1210 00:35:00.608290 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 10 00:35:00 crc kubenswrapper[4884]: I1210 00:35:00.609502 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 10 00:35:00 crc kubenswrapper[4884]: I1210 00:35:00.611295 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 10 00:35:00 crc kubenswrapper[4884]: I1210 00:35:00.712193 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 10 00:35:00 crc kubenswrapper[4884]: I1210 00:35:00.796014 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 10 00:35:00 crc kubenswrapper[4884]: I1210 00:35:00.821732 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 10 00:35:00 crc kubenswrapper[4884]: I1210 00:35:00.971485 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 10 00:35:01 crc kubenswrapper[4884]: I1210 00:35:01.134240 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 10 00:35:01 crc kubenswrapper[4884]: I1210 00:35:01.140101 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 10 00:35:01 crc kubenswrapper[4884]: I1210 00:35:01.143864 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 10 00:35:01 crc kubenswrapper[4884]: I1210 00:35:01.457554 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 10 00:35:01 crc kubenswrapper[4884]: I1210 00:35:01.756811 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 10 00:35:01 crc kubenswrapper[4884]: I1210 00:35:01.930113 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 10 00:35:02 crc kubenswrapper[4884]: I1210 00:35:02.032879 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 10 00:35:02 crc kubenswrapper[4884]: I1210 00:35:02.103977 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 10 00:35:02 crc kubenswrapper[4884]: I1210 00:35:02.178984 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 10 00:35:02 crc kubenswrapper[4884]: I1210 00:35:02.231625 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 10 00:35:02 crc kubenswrapper[4884]: I1210 00:35:02.262968 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 10 00:35:02 crc kubenswrapper[4884]: I1210 00:35:02.355558 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 10 00:35:02 crc kubenswrapper[4884]: I1210 00:35:02.435193 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 10 00:35:02 crc kubenswrapper[4884]: I1210 00:35:02.777064 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 10 00:35:02 crc kubenswrapper[4884]: I1210 00:35:02.818234 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 10 00:35:02 crc kubenswrapper[4884]: I1210 00:35:02.837554 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 10 00:35:02 crc kubenswrapper[4884]: I1210 00:35:02.900771 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.228782 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.258557 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.322403 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.390096 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.411163 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.440861 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.684654 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.812510 4884 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.819941 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7qsx6","openshift-kube-apiserver/kube-apiserver-crc"] Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.820040 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-69879bb87d-wllh4"] Dec 10 00:35:03 crc kubenswrapper[4884]: E1210 00:35:03.820292 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" containerName="registry-server" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.820306 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" containerName="registry-server" Dec 10 00:35:03 crc kubenswrapper[4884]: E1210 00:35:03.820321 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" containerName="extract-utilities" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.820329 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" containerName="extract-utilities" Dec 10 00:35:03 crc kubenswrapper[4884]: E1210 00:35:03.820341 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" containerName="installer" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.820349 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" containerName="installer" Dec 10 00:35:03 crc kubenswrapper[4884]: E1210 00:35:03.820369 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" containerName="extract-content" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.820378 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" containerName="extract-content" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.820479 4884 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="9b93f82f-1fc6-48da-9c2b-12b05ead2f08" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.820505 4884 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="9b93f82f-1fc6-48da-9c2b-12b05ead2f08" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.820522 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" containerName="registry-server" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.820538 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f04c080-cd3a-44c8-821e-1c9ab9baff78" containerName="installer" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.821074 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.823780 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.824042 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.824285 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.827401 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.827648 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.827928 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.828427 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.828523 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.828614 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.828939 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.829144 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.830860 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.834129 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.838479 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.839657 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.839747 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.853255 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.853671 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=16.853643831 podStartE2EDuration="16.853643831s" podCreationTimestamp="2025-12-10 00:34:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:35:03.849662151 +0000 UTC m=+276.927619278" watchObservedRunningTime="2025-12-10 00:35:03.853643831 +0000 UTC m=+276.931600968" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886023 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwmlm\" (UniqueName: \"kubernetes.io/projected/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-kube-api-access-hwmlm\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886097 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-serving-cert\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886135 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-user-template-login\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886170 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-audit-policies\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886385 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-service-ca\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886514 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886582 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886629 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-cliconfig\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886703 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-user-template-error\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886751 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886822 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-session\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886873 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-audit-dir\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.886946 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-router-certs\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.887017 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989124 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwmlm\" (UniqueName: \"kubernetes.io/projected/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-kube-api-access-hwmlm\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989242 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-serving-cert\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989298 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-user-template-login\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989374 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-audit-policies\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989512 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-service-ca\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989576 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989646 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989707 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-cliconfig\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989747 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-user-template-error\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989782 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989829 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-session\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989861 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-audit-dir\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989902 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-router-certs\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.989942 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.991666 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-audit-dir\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.992055 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-service-ca\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.992557 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.993563 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-audit-policies\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.998011 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-user-template-error\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.998418 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-router-certs\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.998638 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.998961 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-serving-cert\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.999142 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-user-template-login\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:03 crc kubenswrapper[4884]: I1210 00:35:03.999458 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-session\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.001917 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.006833 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-cliconfig\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.008783 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.012686 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.021731 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwmlm\" (UniqueName: \"kubernetes.io/projected/cceb76bb-1ba2-436e-8065-f94d1ccf19f5-kube-api-access-hwmlm\") pod \"oauth-openshift-69879bb87d-wllh4\" (UID: \"cceb76bb-1ba2-436e-8065-f94d1ccf19f5\") " pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.041050 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.050511 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.144340 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.161256 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.163966 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.186719 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.232019 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.254807 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.359200 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.458970 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.479601 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.734749 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.797143 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.822571 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.896489 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 10 00:35:04 crc kubenswrapper[4884]: I1210 00:35:04.971467 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.039391 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.045217 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.089420 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.174192 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.292988 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.300076 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="864f39a1-a244-429d-8974-46c6a30aefbb" path="/var/lib/kubelet/pods/864f39a1-a244-429d-8974-46c6a30aefbb/volumes" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.352186 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.358406 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.475397 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.491084 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.530041 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.542962 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.565222 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.822597 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.943980 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.951859 4884 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.969922 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.971852 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 10 00:35:05 crc kubenswrapper[4884]: I1210 00:35:05.983845 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.046510 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.068073 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.085762 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.116672 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.332213 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.373034 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.379359 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.450511 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.464830 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.516837 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.530806 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.577429 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.596810 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.643582 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.684244 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.820836 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.953144 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.974728 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 10 00:35:06 crc kubenswrapper[4884]: I1210 00:35:06.995090 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.100287 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.168991 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-69879bb87d-wllh4"] Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.258207 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.275654 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.348798 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.375470 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.512187 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.610637 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.642666 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.652185 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.664665 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.668421 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.758533 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.785161 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.826889 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.868267 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.925776 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.935087 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-69879bb87d-wllh4_cceb76bb-1ba2-436e-8065-f94d1ccf19f5/oauth-openshift/0.log" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.935126 4884 generic.go:334] "Generic (PLEG): container finished" podID="cceb76bb-1ba2-436e-8065-f94d1ccf19f5" containerID="e8e1b8787d7163f4793f07ade4b03a41d0c51ac6a8b11ad7dd49df6a7f6a233b" exitCode=255 Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.935151 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" event={"ID":"cceb76bb-1ba2-436e-8065-f94d1ccf19f5","Type":"ContainerDied","Data":"e8e1b8787d7163f4793f07ade4b03a41d0c51ac6a8b11ad7dd49df6a7f6a233b"} Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.935176 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" event={"ID":"cceb76bb-1ba2-436e-8065-f94d1ccf19f5","Type":"ContainerStarted","Data":"f26239891ea814e8dfc8812eaf2c1f8f4f22a9da7ced24be8c1b75145a6421f6"} Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.935502 4884 scope.go:117] "RemoveContainer" containerID="e8e1b8787d7163f4793f07ade4b03a41d0c51ac6a8b11ad7dd49df6a7f6a233b" Dec 10 00:35:07 crc kubenswrapper[4884]: I1210 00:35:07.942725 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.001307 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.183291 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.357765 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.366915 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.395493 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.395922 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.446632 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.520593 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.566117 4884 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.569315 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.743504 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.777730 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.875208 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.931557 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.945829 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-69879bb87d-wllh4_cceb76bb-1ba2-436e-8065-f94d1ccf19f5/oauth-openshift/0.log" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.945948 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" event={"ID":"cceb76bb-1ba2-436e-8065-f94d1ccf19f5","Type":"ContainerStarted","Data":"c070e9bb6fbc6a9ff9c352996b8951b87aa2717e5355166398cc1f7a0a765ec6"} Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.946788 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.956167 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" Dec 10 00:35:08 crc kubenswrapper[4884]: I1210 00:35:08.986652 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-69879bb87d-wllh4" podStartSLOduration=66.986620571 podStartE2EDuration="1m6.986620571s" podCreationTimestamp="2025-12-10 00:34:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:35:08.979797011 +0000 UTC m=+282.057754148" watchObservedRunningTime="2025-12-10 00:35:08.986620571 +0000 UTC m=+282.064577728" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.027199 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.128895 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.129081 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.225113 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.268873 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.414688 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.416958 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.442122 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.554978 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.590926 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.655469 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.682150 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.777768 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.807834 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 10 00:35:09 crc kubenswrapper[4884]: I1210 00:35:09.942390 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.027481 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.123333 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.162065 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.218182 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.320273 4884 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.320572 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://c294d99ee418a00e3f071f3f078a5c0e3eed0b58b2dac452bc97a4e4bc6c9659" gracePeriod=5 Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.331589 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.336374 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.442518 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.442710 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.468923 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.491279 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.499244 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.530581 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.677815 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.734382 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.759194 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.776127 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.787775 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.800823 4884 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 10 00:35:10 crc kubenswrapper[4884]: I1210 00:35:10.980634 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.138775 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.148358 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.277040 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.277280 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.291909 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.292690 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.308691 4884 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.311105 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.369585 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.498306 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.508592 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.613756 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.623421 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.686535 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.817635 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.849782 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.877288 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 10 00:35:11 crc kubenswrapper[4884]: I1210 00:35:11.908867 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.014871 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.021726 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.026730 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.204970 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.236906 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.343014 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.397613 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.473173 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.629862 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.669274 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.719021 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.798353 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 10 00:35:12 crc kubenswrapper[4884]: I1210 00:35:12.896284 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.048862 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.086280 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.124015 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.173234 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.419347 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.437321 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.455242 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.467463 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.571130 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.772605 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.884601 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.966119 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 10 00:35:13 crc kubenswrapper[4884]: I1210 00:35:13.997425 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 10 00:35:14 crc kubenswrapper[4884]: I1210 00:35:14.076969 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 10 00:35:14 crc kubenswrapper[4884]: I1210 00:35:14.156720 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 10 00:35:14 crc kubenswrapper[4884]: I1210 00:35:14.160308 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 10 00:35:14 crc kubenswrapper[4884]: I1210 00:35:14.172090 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 10 00:35:14 crc kubenswrapper[4884]: I1210 00:35:14.356297 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 10 00:35:14 crc kubenswrapper[4884]: I1210 00:35:14.430121 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 10 00:35:14 crc kubenswrapper[4884]: I1210 00:35:14.459103 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 10 00:35:14 crc kubenswrapper[4884]: I1210 00:35:14.485159 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 10 00:35:14 crc kubenswrapper[4884]: I1210 00:35:14.811896 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 10 00:35:14 crc kubenswrapper[4884]: I1210 00:35:14.966034 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.189659 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.293043 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.439805 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.439887 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.492562 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.540180 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.606256 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.606384 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.606417 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.606488 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.606539 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.606583 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.606645 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.606733 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.606888 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.607284 4884 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.607329 4884 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.607352 4884 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.607417 4884 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.620025 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.708789 4884 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.732377 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.819096 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.948119 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.995470 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.995594 4884 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="c294d99ee418a00e3f071f3f078a5c0e3eed0b58b2dac452bc97a4e4bc6c9659" exitCode=137 Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.995674 4884 scope.go:117] "RemoveContainer" containerID="c294d99ee418a00e3f071f3f078a5c0e3eed0b58b2dac452bc97a4e4bc6c9659" Dec 10 00:35:15 crc kubenswrapper[4884]: I1210 00:35:15.995749 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 10 00:35:16 crc kubenswrapper[4884]: I1210 00:35:16.024857 4884 scope.go:117] "RemoveContainer" containerID="c294d99ee418a00e3f071f3f078a5c0e3eed0b58b2dac452bc97a4e4bc6c9659" Dec 10 00:35:16 crc kubenswrapper[4884]: E1210 00:35:16.025501 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c294d99ee418a00e3f071f3f078a5c0e3eed0b58b2dac452bc97a4e4bc6c9659\": container with ID starting with c294d99ee418a00e3f071f3f078a5c0e3eed0b58b2dac452bc97a4e4bc6c9659 not found: ID does not exist" containerID="c294d99ee418a00e3f071f3f078a5c0e3eed0b58b2dac452bc97a4e4bc6c9659" Dec 10 00:35:16 crc kubenswrapper[4884]: I1210 00:35:16.025584 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c294d99ee418a00e3f071f3f078a5c0e3eed0b58b2dac452bc97a4e4bc6c9659"} err="failed to get container status \"c294d99ee418a00e3f071f3f078a5c0e3eed0b58b2dac452bc97a4e4bc6c9659\": rpc error: code = NotFound desc = could not find container \"c294d99ee418a00e3f071f3f078a5c0e3eed0b58b2dac452bc97a4e4bc6c9659\": container with ID starting with c294d99ee418a00e3f071f3f078a5c0e3eed0b58b2dac452bc97a4e4bc6c9659 not found: ID does not exist" Dec 10 00:35:16 crc kubenswrapper[4884]: I1210 00:35:16.412657 4884 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 10 00:35:17 crc kubenswrapper[4884]: I1210 00:35:17.301106 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Dec 10 00:35:27 crc kubenswrapper[4884]: I1210 00:35:27.068632 4884 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Dec 10 00:35:32 crc kubenswrapper[4884]: I1210 00:35:32.106578 4884 generic.go:334] "Generic (PLEG): container finished" podID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" containerID="942d31a2ebc9e789159947fa3eccd4cd52e94c6531274973588c05e45ee61931" exitCode=0 Dec 10 00:35:32 crc kubenswrapper[4884]: I1210 00:35:32.106685 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" event={"ID":"db8164d6-52ea-4ee2-b307-0acc3cbd72a9","Type":"ContainerDied","Data":"942d31a2ebc9e789159947fa3eccd4cd52e94c6531274973588c05e45ee61931"} Dec 10 00:35:32 crc kubenswrapper[4884]: I1210 00:35:32.109501 4884 scope.go:117] "RemoveContainer" containerID="942d31a2ebc9e789159947fa3eccd4cd52e94c6531274973588c05e45ee61931" Dec 10 00:35:33 crc kubenswrapper[4884]: I1210 00:35:33.118968 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" event={"ID":"db8164d6-52ea-4ee2-b307-0acc3cbd72a9","Type":"ContainerStarted","Data":"d9028f908c919244a2fdaa126a47128c6d84f8430265221b40d121d5c62d61e9"} Dec 10 00:35:33 crc kubenswrapper[4884]: I1210 00:35:33.120112 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:35:33 crc kubenswrapper[4884]: I1210 00:35:33.121933 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:35:38 crc kubenswrapper[4884]: I1210 00:35:38.896944 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-rqkv4"] Dec 10 00:35:38 crc kubenswrapper[4884]: I1210 00:35:38.997966 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw"] Dec 10 00:35:38 crc kubenswrapper[4884]: I1210 00:35:38.998170 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" podUID="af455a3c-331c-44ed-9619-1f66379fd774" containerName="route-controller-manager" containerID="cri-o://65cdd817d329b317d7782fb14fd992a4e47fbbfb23a2e8d41848149e6587c017" gracePeriod=30 Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.164354 4884 generic.go:334] "Generic (PLEG): container finished" podID="af455a3c-331c-44ed-9619-1f66379fd774" containerID="65cdd817d329b317d7782fb14fd992a4e47fbbfb23a2e8d41848149e6587c017" exitCode=0 Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.164599 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" podUID="415bb537-8c4e-4114-b627-6c76d7cb6738" containerName="controller-manager" containerID="cri-o://f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1" gracePeriod=30 Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.164732 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" event={"ID":"af455a3c-331c-44ed-9619-1f66379fd774","Type":"ContainerDied","Data":"65cdd817d329b317d7782fb14fd992a4e47fbbfb23a2e8d41848149e6587c017"} Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.433560 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.602020 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af455a3c-331c-44ed-9619-1f66379fd774-serving-cert\") pod \"af455a3c-331c-44ed-9619-1f66379fd774\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.602129 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af455a3c-331c-44ed-9619-1f66379fd774-config\") pod \"af455a3c-331c-44ed-9619-1f66379fd774\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.602189 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af455a3c-331c-44ed-9619-1f66379fd774-client-ca\") pod \"af455a3c-331c-44ed-9619-1f66379fd774\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.602232 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mswlq\" (UniqueName: \"kubernetes.io/projected/af455a3c-331c-44ed-9619-1f66379fd774-kube-api-access-mswlq\") pod \"af455a3c-331c-44ed-9619-1f66379fd774\" (UID: \"af455a3c-331c-44ed-9619-1f66379fd774\") " Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.603402 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af455a3c-331c-44ed-9619-1f66379fd774-client-ca" (OuterVolumeSpecName: "client-ca") pod "af455a3c-331c-44ed-9619-1f66379fd774" (UID: "af455a3c-331c-44ed-9619-1f66379fd774"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.603510 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af455a3c-331c-44ed-9619-1f66379fd774-config" (OuterVolumeSpecName: "config") pod "af455a3c-331c-44ed-9619-1f66379fd774" (UID: "af455a3c-331c-44ed-9619-1f66379fd774"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.607757 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af455a3c-331c-44ed-9619-1f66379fd774-kube-api-access-mswlq" (OuterVolumeSpecName: "kube-api-access-mswlq") pod "af455a3c-331c-44ed-9619-1f66379fd774" (UID: "af455a3c-331c-44ed-9619-1f66379fd774"). InnerVolumeSpecName "kube-api-access-mswlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.608329 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af455a3c-331c-44ed-9619-1f66379fd774-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "af455a3c-331c-44ed-9619-1f66379fd774" (UID: "af455a3c-331c-44ed-9619-1f66379fd774"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.703874 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af455a3c-331c-44ed-9619-1f66379fd774-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.703913 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/af455a3c-331c-44ed-9619-1f66379fd774-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.703924 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mswlq\" (UniqueName: \"kubernetes.io/projected/af455a3c-331c-44ed-9619-1f66379fd774-kube-api-access-mswlq\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:39 crc kubenswrapper[4884]: I1210 00:35:39.703949 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/af455a3c-331c-44ed-9619-1f66379fd774-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.061555 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.208111 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" event={"ID":"af455a3c-331c-44ed-9619-1f66379fd774","Type":"ContainerDied","Data":"6977bb5063bfad7516359a26ac2c538adabea7e56aa8c75386b0d0935b7f4825"} Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.208407 4884 scope.go:117] "RemoveContainer" containerID="65cdd817d329b317d7782fb14fd992a4e47fbbfb23a2e8d41848149e6587c017" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.208329 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.209633 4884 generic.go:334] "Generic (PLEG): container finished" podID="415bb537-8c4e-4114-b627-6c76d7cb6738" containerID="f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1" exitCode=0 Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.209682 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" event={"ID":"415bb537-8c4e-4114-b627-6c76d7cb6738","Type":"ContainerDied","Data":"f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1"} Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.209708 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" event={"ID":"415bb537-8c4e-4114-b627-6c76d7cb6738","Type":"ContainerDied","Data":"5afbb919a42989cfd1c7efa7684c523d41bbf8e168028e833292fedf03affdb6"} Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.209741 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-rqkv4" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.231085 4884 scope.go:117] "RemoveContainer" containerID="f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.235064 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw"] Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.240112 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-p6jkw"] Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.244426 4884 scope.go:117] "RemoveContainer" containerID="f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1" Dec 10 00:35:40 crc kubenswrapper[4884]: E1210 00:35:40.244828 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1\": container with ID starting with f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1 not found: ID does not exist" containerID="f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.244862 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1"} err="failed to get container status \"f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1\": rpc error: code = NotFound desc = could not find container \"f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1\": container with ID starting with f06a7b6ef8705b59ad05b8a9cee5578f6df5ec16157d7ec9f57672ef8621b4d1 not found: ID does not exist" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.298568 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szbbd\" (UniqueName: \"kubernetes.io/projected/415bb537-8c4e-4114-b627-6c76d7cb6738-kube-api-access-szbbd\") pod \"415bb537-8c4e-4114-b627-6c76d7cb6738\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.298906 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-config\") pod \"415bb537-8c4e-4114-b627-6c76d7cb6738\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.298994 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/415bb537-8c4e-4114-b627-6c76d7cb6738-serving-cert\") pod \"415bb537-8c4e-4114-b627-6c76d7cb6738\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.299043 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-client-ca\") pod \"415bb537-8c4e-4114-b627-6c76d7cb6738\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.299069 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-proxy-ca-bundles\") pod \"415bb537-8c4e-4114-b627-6c76d7cb6738\" (UID: \"415bb537-8c4e-4114-b627-6c76d7cb6738\") " Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.299696 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-config" (OuterVolumeSpecName: "config") pod "415bb537-8c4e-4114-b627-6c76d7cb6738" (UID: "415bb537-8c4e-4114-b627-6c76d7cb6738"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.299901 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-client-ca" (OuterVolumeSpecName: "client-ca") pod "415bb537-8c4e-4114-b627-6c76d7cb6738" (UID: "415bb537-8c4e-4114-b627-6c76d7cb6738"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.299952 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "415bb537-8c4e-4114-b627-6c76d7cb6738" (UID: "415bb537-8c4e-4114-b627-6c76d7cb6738"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.304233 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/415bb537-8c4e-4114-b627-6c76d7cb6738-kube-api-access-szbbd" (OuterVolumeSpecName: "kube-api-access-szbbd") pod "415bb537-8c4e-4114-b627-6c76d7cb6738" (UID: "415bb537-8c4e-4114-b627-6c76d7cb6738"). InnerVolumeSpecName "kube-api-access-szbbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.304237 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/415bb537-8c4e-4114-b627-6c76d7cb6738-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "415bb537-8c4e-4114-b627-6c76d7cb6738" (UID: "415bb537-8c4e-4114-b627-6c76d7cb6738"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.400376 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szbbd\" (UniqueName: \"kubernetes.io/projected/415bb537-8c4e-4114-b627-6c76d7cb6738-kube-api-access-szbbd\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.400426 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.400459 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/415bb537-8c4e-4114-b627-6c76d7cb6738-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.400472 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.400483 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/415bb537-8c4e-4114-b627-6c76d7cb6738-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.560327 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-rqkv4"] Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.566884 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-rqkv4"] Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.760266 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4"] Dec 10 00:35:40 crc kubenswrapper[4884]: E1210 00:35:40.760783 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="415bb537-8c4e-4114-b627-6c76d7cb6738" containerName="controller-manager" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.760840 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="415bb537-8c4e-4114-b627-6c76d7cb6738" containerName="controller-manager" Dec 10 00:35:40 crc kubenswrapper[4884]: E1210 00:35:40.760862 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.760882 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 10 00:35:40 crc kubenswrapper[4884]: E1210 00:35:40.760916 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af455a3c-331c-44ed-9619-1f66379fd774" containerName="route-controller-manager" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.760936 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="af455a3c-331c-44ed-9619-1f66379fd774" containerName="route-controller-manager" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.761235 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="af455a3c-331c-44ed-9619-1f66379fd774" containerName="route-controller-manager" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.761276 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.761299 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="415bb537-8c4e-4114-b627-6c76d7cb6738" containerName="controller-manager" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.762190 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.768420 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.768621 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.769025 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.769104 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5b5446fddb-5t46n"] Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.769427 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.769851 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.770686 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.772258 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.777287 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4"] Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.780332 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.780965 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.781342 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.781992 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.782277 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.783426 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.798134 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.804485 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b5446fddb-5t46n"] Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.898503 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5b5446fddb-5t46n"] Dec 10 00:35:40 crc kubenswrapper[4884]: E1210 00:35:40.898999 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[client-ca config kube-api-access-69kt9 proxy-ca-bundles serving-cert], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" podUID="cd13d21f-0c44-43f9-bd29-df451fc7905a" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.906056 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4"] Dec 10 00:35:40 crc kubenswrapper[4884]: E1210 00:35:40.906697 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[client-ca config kube-api-access-8htff serving-cert], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" podUID="fc72c2dd-0633-4b38-913a-a1055d9b1dd8" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.908033 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-serving-cert\") pod \"route-controller-manager-74dcd8b68b-jcnf4\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.908130 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-config\") pod \"route-controller-manager-74dcd8b68b-jcnf4\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.908177 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-config\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.908216 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-client-ca\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.908240 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-client-ca\") pod \"route-controller-manager-74dcd8b68b-jcnf4\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.908268 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69kt9\" (UniqueName: \"kubernetes.io/projected/cd13d21f-0c44-43f9-bd29-df451fc7905a-kube-api-access-69kt9\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.908303 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cd13d21f-0c44-43f9-bd29-df451fc7905a-serving-cert\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.908330 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8htff\" (UniqueName: \"kubernetes.io/projected/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-kube-api-access-8htff\") pod \"route-controller-manager-74dcd8b68b-jcnf4\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:40 crc kubenswrapper[4884]: I1210 00:35:40.908374 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-proxy-ca-bundles\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.010328 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-config\") pod \"route-controller-manager-74dcd8b68b-jcnf4\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.010384 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-config\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.010402 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-client-ca\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.010420 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-client-ca\") pod \"route-controller-manager-74dcd8b68b-jcnf4\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.010459 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69kt9\" (UniqueName: \"kubernetes.io/projected/cd13d21f-0c44-43f9-bd29-df451fc7905a-kube-api-access-69kt9\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.010484 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cd13d21f-0c44-43f9-bd29-df451fc7905a-serving-cert\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.010507 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8htff\" (UniqueName: \"kubernetes.io/projected/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-kube-api-access-8htff\") pod \"route-controller-manager-74dcd8b68b-jcnf4\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.010536 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-proxy-ca-bundles\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.010556 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-serving-cert\") pod \"route-controller-manager-74dcd8b68b-jcnf4\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.013114 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-config\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.013699 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-client-ca\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.014013 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-config\") pod \"route-controller-manager-74dcd8b68b-jcnf4\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.014241 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-client-ca\") pod \"route-controller-manager-74dcd8b68b-jcnf4\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.015237 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-serving-cert\") pod \"route-controller-manager-74dcd8b68b-jcnf4\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.016193 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-proxy-ca-bundles\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.020426 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cd13d21f-0c44-43f9-bd29-df451fc7905a-serving-cert\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.035226 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8htff\" (UniqueName: \"kubernetes.io/projected/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-kube-api-access-8htff\") pod \"route-controller-manager-74dcd8b68b-jcnf4\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.036111 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69kt9\" (UniqueName: \"kubernetes.io/projected/cd13d21f-0c44-43f9-bd29-df451fc7905a-kube-api-access-69kt9\") pod \"controller-manager-5b5446fddb-5t46n\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.220821 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.220821 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.234470 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.240921 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.297717 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="415bb537-8c4e-4114-b627-6c76d7cb6738" path="/var/lib/kubelet/pods/415bb537-8c4e-4114-b627-6c76d7cb6738/volumes" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.298785 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af455a3c-331c-44ed-9619-1f66379fd774" path="/var/lib/kubelet/pods/af455a3c-331c-44ed-9619-1f66379fd774/volumes" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.417588 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cd13d21f-0c44-43f9-bd29-df451fc7905a-serving-cert\") pod \"cd13d21f-0c44-43f9-bd29-df451fc7905a\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.418094 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-config\") pod \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.418139 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-serving-cert\") pod \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.418204 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-client-ca\") pod \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.418235 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-proxy-ca-bundles\") pod \"cd13d21f-0c44-43f9-bd29-df451fc7905a\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.418358 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8htff\" (UniqueName: \"kubernetes.io/projected/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-kube-api-access-8htff\") pod \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\" (UID: \"fc72c2dd-0633-4b38-913a-a1055d9b1dd8\") " Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.419128 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-config" (OuterVolumeSpecName: "config") pod "fc72c2dd-0633-4b38-913a-a1055d9b1dd8" (UID: "fc72c2dd-0633-4b38-913a-a1055d9b1dd8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.419170 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-config\") pod \"cd13d21f-0c44-43f9-bd29-df451fc7905a\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.419221 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-client-ca" (OuterVolumeSpecName: "client-ca") pod "fc72c2dd-0633-4b38-913a-a1055d9b1dd8" (UID: "fc72c2dd-0633-4b38-913a-a1055d9b1dd8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.419222 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "cd13d21f-0c44-43f9-bd29-df451fc7905a" (UID: "cd13d21f-0c44-43f9-bd29-df451fc7905a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.419251 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-client-ca\") pod \"cd13d21f-0c44-43f9-bd29-df451fc7905a\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.419379 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69kt9\" (UniqueName: \"kubernetes.io/projected/cd13d21f-0c44-43f9-bd29-df451fc7905a-kube-api-access-69kt9\") pod \"cd13d21f-0c44-43f9-bd29-df451fc7905a\" (UID: \"cd13d21f-0c44-43f9-bd29-df451fc7905a\") " Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.419960 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-client-ca" (OuterVolumeSpecName: "client-ca") pod "cd13d21f-0c44-43f9-bd29-df451fc7905a" (UID: "cd13d21f-0c44-43f9-bd29-df451fc7905a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.419974 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.420048 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.420069 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.420176 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-config" (OuterVolumeSpecName: "config") pod "cd13d21f-0c44-43f9-bd29-df451fc7905a" (UID: "cd13d21f-0c44-43f9-bd29-df451fc7905a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.422752 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd13d21f-0c44-43f9-bd29-df451fc7905a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "cd13d21f-0c44-43f9-bd29-df451fc7905a" (UID: "cd13d21f-0c44-43f9-bd29-df451fc7905a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.423956 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd13d21f-0c44-43f9-bd29-df451fc7905a-kube-api-access-69kt9" (OuterVolumeSpecName: "kube-api-access-69kt9") pod "cd13d21f-0c44-43f9-bd29-df451fc7905a" (UID: "cd13d21f-0c44-43f9-bd29-df451fc7905a"). InnerVolumeSpecName "kube-api-access-69kt9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.424064 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "fc72c2dd-0633-4b38-913a-a1055d9b1dd8" (UID: "fc72c2dd-0633-4b38-913a-a1055d9b1dd8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.433365 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-kube-api-access-8htff" (OuterVolumeSpecName: "kube-api-access-8htff") pod "fc72c2dd-0633-4b38-913a-a1055d9b1dd8" (UID: "fc72c2dd-0633-4b38-913a-a1055d9b1dd8"). InnerVolumeSpecName "kube-api-access-8htff". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.521884 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8htff\" (UniqueName: \"kubernetes.io/projected/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-kube-api-access-8htff\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.521933 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.521946 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cd13d21f-0c44-43f9-bd29-df451fc7905a-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.521960 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69kt9\" (UniqueName: \"kubernetes.io/projected/cd13d21f-0c44-43f9-bd29-df451fc7905a-kube-api-access-69kt9\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.521972 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cd13d21f-0c44-43f9-bd29-df451fc7905a-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:41 crc kubenswrapper[4884]: I1210 00:35:41.521984 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fc72c2dd-0633-4b38-913a-a1055d9b1dd8-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.226777 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b5446fddb-5t46n" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.226837 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.285386 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6488c7567-wqrht"] Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.286592 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.291167 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.291660 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.291763 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5b5446fddb-5t46n"] Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.292340 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.292618 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.292856 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.293012 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5b5446fddb-5t46n"] Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.295713 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6488c7567-wqrht"] Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.298543 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.306927 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.319582 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4"] Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.322253 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dcd8b68b-jcnf4"] Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.435289 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-client-ca\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.436056 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7l8f\" (UniqueName: \"kubernetes.io/projected/a10d8a72-81da-4077-b18f-ba448b4ddbf5-kube-api-access-v7l8f\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.436399 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-proxy-ca-bundles\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.436692 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-config\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.436934 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a10d8a72-81da-4077-b18f-ba448b4ddbf5-serving-cert\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.541187 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-config\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.538761 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-config\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.541364 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a10d8a72-81da-4077-b18f-ba448b4ddbf5-serving-cert\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.542609 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-client-ca\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.542667 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7l8f\" (UniqueName: \"kubernetes.io/projected/a10d8a72-81da-4077-b18f-ba448b4ddbf5-kube-api-access-v7l8f\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.542722 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-proxy-ca-bundles\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.544553 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-proxy-ca-bundles\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.546085 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-client-ca\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.547519 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a10d8a72-81da-4077-b18f-ba448b4ddbf5-serving-cert\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.571707 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7l8f\" (UniqueName: \"kubernetes.io/projected/a10d8a72-81da-4077-b18f-ba448b4ddbf5-kube-api-access-v7l8f\") pod \"controller-manager-6488c7567-wqrht\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.612860 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:42 crc kubenswrapper[4884]: I1210 00:35:42.910870 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6488c7567-wqrht"] Dec 10 00:35:43 crc kubenswrapper[4884]: I1210 00:35:43.236934 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" event={"ID":"a10d8a72-81da-4077-b18f-ba448b4ddbf5","Type":"ContainerStarted","Data":"559e9b0aeab72cd71d4c2bdf9f96fb31fb35905f91fc95c280a8de38555808c8"} Dec 10 00:35:43 crc kubenswrapper[4884]: I1210 00:35:43.237013 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" event={"ID":"a10d8a72-81da-4077-b18f-ba448b4ddbf5","Type":"ContainerStarted","Data":"c5e970e4ac08fed03cf8df0052b8a0a3015f84ecacae3fef19aa5a829ca500aa"} Dec 10 00:35:43 crc kubenswrapper[4884]: I1210 00:35:43.239123 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:43 crc kubenswrapper[4884]: I1210 00:35:43.246345 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:43 crc kubenswrapper[4884]: I1210 00:35:43.256491 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" podStartSLOduration=3.256473346 podStartE2EDuration="3.256473346s" podCreationTimestamp="2025-12-10 00:35:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:35:43.254518481 +0000 UTC m=+316.332475698" watchObservedRunningTime="2025-12-10 00:35:43.256473346 +0000 UTC m=+316.334430463" Dec 10 00:35:43 crc kubenswrapper[4884]: I1210 00:35:43.294510 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd13d21f-0c44-43f9-bd29-df451fc7905a" path="/var/lib/kubelet/pods/cd13d21f-0c44-43f9-bd29-df451fc7905a/volumes" Dec 10 00:35:43 crc kubenswrapper[4884]: I1210 00:35:43.295169 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc72c2dd-0633-4b38-913a-a1055d9b1dd8" path="/var/lib/kubelet/pods/fc72c2dd-0633-4b38-913a-a1055d9b1dd8/volumes" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.756106 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc"] Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.761668 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.765333 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.765918 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.766404 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.766654 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.766741 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.771776 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.774211 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ca1d2c87-d80a-4f09-a165-230f87d54641-client-ca\") pod \"route-controller-manager-d5775f87f-8d7mc\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.774265 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2v8x\" (UniqueName: \"kubernetes.io/projected/ca1d2c87-d80a-4f09-a165-230f87d54641-kube-api-access-h2v8x\") pod \"route-controller-manager-d5775f87f-8d7mc\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.774299 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca1d2c87-d80a-4f09-a165-230f87d54641-serving-cert\") pod \"route-controller-manager-d5775f87f-8d7mc\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.774337 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca1d2c87-d80a-4f09-a165-230f87d54641-config\") pod \"route-controller-manager-d5775f87f-8d7mc\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.786394 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc"] Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.876056 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ca1d2c87-d80a-4f09-a165-230f87d54641-client-ca\") pod \"route-controller-manager-d5775f87f-8d7mc\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.876126 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2v8x\" (UniqueName: \"kubernetes.io/projected/ca1d2c87-d80a-4f09-a165-230f87d54641-kube-api-access-h2v8x\") pod \"route-controller-manager-d5775f87f-8d7mc\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.876166 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca1d2c87-d80a-4f09-a165-230f87d54641-serving-cert\") pod \"route-controller-manager-d5775f87f-8d7mc\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.876208 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca1d2c87-d80a-4f09-a165-230f87d54641-config\") pod \"route-controller-manager-d5775f87f-8d7mc\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.877223 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ca1d2c87-d80a-4f09-a165-230f87d54641-client-ca\") pod \"route-controller-manager-d5775f87f-8d7mc\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.877551 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca1d2c87-d80a-4f09-a165-230f87d54641-config\") pod \"route-controller-manager-d5775f87f-8d7mc\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.889085 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca1d2c87-d80a-4f09-a165-230f87d54641-serving-cert\") pod \"route-controller-manager-d5775f87f-8d7mc\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:44 crc kubenswrapper[4884]: I1210 00:35:44.910056 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2v8x\" (UniqueName: \"kubernetes.io/projected/ca1d2c87-d80a-4f09-a165-230f87d54641-kube-api-access-h2v8x\") pod \"route-controller-manager-d5775f87f-8d7mc\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:45 crc kubenswrapper[4884]: I1210 00:35:45.097748 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:45 crc kubenswrapper[4884]: I1210 00:35:45.375728 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc"] Dec 10 00:35:46 crc kubenswrapper[4884]: I1210 00:35:46.261803 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" event={"ID":"ca1d2c87-d80a-4f09-a165-230f87d54641","Type":"ContainerStarted","Data":"5635e950e439ba213a5f5648488fbcdd3255cd7460861fc5d97399d71149322d"} Dec 10 00:35:46 crc kubenswrapper[4884]: I1210 00:35:46.262257 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" event={"ID":"ca1d2c87-d80a-4f09-a165-230f87d54641","Type":"ContainerStarted","Data":"81edf91b430224f4f5c5e124848a2a217a3134646336a4e2c40bc489c3de7c62"} Dec 10 00:35:46 crc kubenswrapper[4884]: I1210 00:35:46.264635 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:46 crc kubenswrapper[4884]: I1210 00:35:46.277300 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:46 crc kubenswrapper[4884]: I1210 00:35:46.320894 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" podStartSLOduration=6.320866692 podStartE2EDuration="6.320866692s" podCreationTimestamp="2025-12-10 00:35:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:35:46.285642202 +0000 UTC m=+319.363599379" watchObservedRunningTime="2025-12-10 00:35:46.320866692 +0000 UTC m=+319.398823849" Dec 10 00:35:55 crc kubenswrapper[4884]: I1210 00:35:55.905931 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rdvc9"] Dec 10 00:35:55 crc kubenswrapper[4884]: I1210 00:35:55.907000 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rdvc9" podUID="0e6acb02-4f9e-4771-9213-955aa68cfa43" containerName="registry-server" containerID="cri-o://e6d3088d82f1063697f86d936085132d571701b4a94fcb2ad70838fa2320141c" gracePeriod=2 Dec 10 00:35:56 crc kubenswrapper[4884]: I1210 00:35:56.333635 4884 generic.go:334] "Generic (PLEG): container finished" podID="0e6acb02-4f9e-4771-9213-955aa68cfa43" containerID="e6d3088d82f1063697f86d936085132d571701b4a94fcb2ad70838fa2320141c" exitCode=0 Dec 10 00:35:56 crc kubenswrapper[4884]: I1210 00:35:56.333750 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rdvc9" event={"ID":"0e6acb02-4f9e-4771-9213-955aa68cfa43","Type":"ContainerDied","Data":"e6d3088d82f1063697f86d936085132d571701b4a94fcb2ad70838fa2320141c"} Dec 10 00:35:56 crc kubenswrapper[4884]: I1210 00:35:56.979552 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.066317 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e6acb02-4f9e-4771-9213-955aa68cfa43-catalog-content\") pod \"0e6acb02-4f9e-4771-9213-955aa68cfa43\" (UID: \"0e6acb02-4f9e-4771-9213-955aa68cfa43\") " Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.066629 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e6acb02-4f9e-4771-9213-955aa68cfa43-utilities\") pod \"0e6acb02-4f9e-4771-9213-955aa68cfa43\" (UID: \"0e6acb02-4f9e-4771-9213-955aa68cfa43\") " Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.067736 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e6acb02-4f9e-4771-9213-955aa68cfa43-utilities" (OuterVolumeSpecName: "utilities") pod "0e6acb02-4f9e-4771-9213-955aa68cfa43" (UID: "0e6acb02-4f9e-4771-9213-955aa68cfa43"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.168736 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qq8wq\" (UniqueName: \"kubernetes.io/projected/0e6acb02-4f9e-4771-9213-955aa68cfa43-kube-api-access-qq8wq\") pod \"0e6acb02-4f9e-4771-9213-955aa68cfa43\" (UID: \"0e6acb02-4f9e-4771-9213-955aa68cfa43\") " Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.169128 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0e6acb02-4f9e-4771-9213-955aa68cfa43-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.175743 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e6acb02-4f9e-4771-9213-955aa68cfa43-kube-api-access-qq8wq" (OuterVolumeSpecName: "kube-api-access-qq8wq") pod "0e6acb02-4f9e-4771-9213-955aa68cfa43" (UID: "0e6acb02-4f9e-4771-9213-955aa68cfa43"). InnerVolumeSpecName "kube-api-access-qq8wq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.210989 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e6acb02-4f9e-4771-9213-955aa68cfa43-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0e6acb02-4f9e-4771-9213-955aa68cfa43" (UID: "0e6acb02-4f9e-4771-9213-955aa68cfa43"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.270326 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0e6acb02-4f9e-4771-9213-955aa68cfa43-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.270647 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qq8wq\" (UniqueName: \"kubernetes.io/projected/0e6acb02-4f9e-4771-9213-955aa68cfa43-kube-api-access-qq8wq\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.348229 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rdvc9" event={"ID":"0e6acb02-4f9e-4771-9213-955aa68cfa43","Type":"ContainerDied","Data":"93ad622b64295ee43ac8e3b8db3804784d1741aca5822fd5ba97c7036ff7cdc6"} Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.348349 4884 scope.go:117] "RemoveContainer" containerID="e6d3088d82f1063697f86d936085132d571701b4a94fcb2ad70838fa2320141c" Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.348784 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rdvc9" Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.384180 4884 scope.go:117] "RemoveContainer" containerID="b10d107031a5c5a4b8cddcd77de7e0a5f69e049fe20fbebfc7ff542f1aaea25e" Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.392928 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rdvc9"] Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.400615 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rdvc9"] Dec 10 00:35:57 crc kubenswrapper[4884]: I1210 00:35:57.418782 4884 scope.go:117] "RemoveContainer" containerID="eb96f84f23ab7ca6ebc7bda4f021a7df6f939733f5cee38b83f2e1028689d985" Dec 10 00:35:58 crc kubenswrapper[4884]: I1210 00:35:58.882867 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6488c7567-wqrht"] Dec 10 00:35:58 crc kubenswrapper[4884]: I1210 00:35:58.883098 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" podUID="a10d8a72-81da-4077-b18f-ba448b4ddbf5" containerName="controller-manager" containerID="cri-o://559e9b0aeab72cd71d4c2bdf9f96fb31fb35905f91fc95c280a8de38555808c8" gracePeriod=30 Dec 10 00:35:58 crc kubenswrapper[4884]: I1210 00:35:58.909598 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc"] Dec 10 00:35:58 crc kubenswrapper[4884]: I1210 00:35:58.910338 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" podUID="ca1d2c87-d80a-4f09-a165-230f87d54641" containerName="route-controller-manager" containerID="cri-o://5635e950e439ba213a5f5648488fbcdd3255cd7460861fc5d97399d71149322d" gracePeriod=30 Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.295856 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e6acb02-4f9e-4771-9213-955aa68cfa43" path="/var/lib/kubelet/pods/0e6acb02-4f9e-4771-9213-955aa68cfa43/volumes" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.364101 4884 generic.go:334] "Generic (PLEG): container finished" podID="ca1d2c87-d80a-4f09-a165-230f87d54641" containerID="5635e950e439ba213a5f5648488fbcdd3255cd7460861fc5d97399d71149322d" exitCode=0 Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.364171 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" event={"ID":"ca1d2c87-d80a-4f09-a165-230f87d54641","Type":"ContainerDied","Data":"5635e950e439ba213a5f5648488fbcdd3255cd7460861fc5d97399d71149322d"} Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.365561 4884 generic.go:334] "Generic (PLEG): container finished" podID="a10d8a72-81da-4077-b18f-ba448b4ddbf5" containerID="559e9b0aeab72cd71d4c2bdf9f96fb31fb35905f91fc95c280a8de38555808c8" exitCode=0 Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.365616 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" event={"ID":"a10d8a72-81da-4077-b18f-ba448b4ddbf5","Type":"ContainerDied","Data":"559e9b0aeab72cd71d4c2bdf9f96fb31fb35905f91fc95c280a8de38555808c8"} Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.426923 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.546664 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.620638 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2v8x\" (UniqueName: \"kubernetes.io/projected/ca1d2c87-d80a-4f09-a165-230f87d54641-kube-api-access-h2v8x\") pod \"ca1d2c87-d80a-4f09-a165-230f87d54641\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.620718 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca1d2c87-d80a-4f09-a165-230f87d54641-serving-cert\") pod \"ca1d2c87-d80a-4f09-a165-230f87d54641\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.620803 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ca1d2c87-d80a-4f09-a165-230f87d54641-client-ca\") pod \"ca1d2c87-d80a-4f09-a165-230f87d54641\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.620878 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca1d2c87-d80a-4f09-a165-230f87d54641-config\") pod \"ca1d2c87-d80a-4f09-a165-230f87d54641\" (UID: \"ca1d2c87-d80a-4f09-a165-230f87d54641\") " Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.622056 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca1d2c87-d80a-4f09-a165-230f87d54641-config" (OuterVolumeSpecName: "config") pod "ca1d2c87-d80a-4f09-a165-230f87d54641" (UID: "ca1d2c87-d80a-4f09-a165-230f87d54641"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.623336 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca1d2c87-d80a-4f09-a165-230f87d54641-client-ca" (OuterVolumeSpecName: "client-ca") pod "ca1d2c87-d80a-4f09-a165-230f87d54641" (UID: "ca1d2c87-d80a-4f09-a165-230f87d54641"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.630664 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca1d2c87-d80a-4f09-a165-230f87d54641-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ca1d2c87-d80a-4f09-a165-230f87d54641" (UID: "ca1d2c87-d80a-4f09-a165-230f87d54641"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.632526 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca1d2c87-d80a-4f09-a165-230f87d54641-kube-api-access-h2v8x" (OuterVolumeSpecName: "kube-api-access-h2v8x") pod "ca1d2c87-d80a-4f09-a165-230f87d54641" (UID: "ca1d2c87-d80a-4f09-a165-230f87d54641"). InnerVolumeSpecName "kube-api-access-h2v8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.721834 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-client-ca\") pod \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.721976 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a10d8a72-81da-4077-b18f-ba448b4ddbf5-serving-cert\") pod \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.722018 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7l8f\" (UniqueName: \"kubernetes.io/projected/a10d8a72-81da-4077-b18f-ba448b4ddbf5-kube-api-access-v7l8f\") pod \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.722046 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-config\") pod \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.722082 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-proxy-ca-bundles\") pod \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\" (UID: \"a10d8a72-81da-4077-b18f-ba448b4ddbf5\") " Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.722322 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2v8x\" (UniqueName: \"kubernetes.io/projected/ca1d2c87-d80a-4f09-a165-230f87d54641-kube-api-access-h2v8x\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.722337 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca1d2c87-d80a-4f09-a165-230f87d54641-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.722348 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ca1d2c87-d80a-4f09-a165-230f87d54641-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.722358 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca1d2c87-d80a-4f09-a165-230f87d54641-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.722903 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "a10d8a72-81da-4077-b18f-ba448b4ddbf5" (UID: "a10d8a72-81da-4077-b18f-ba448b4ddbf5"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.723041 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-client-ca" (OuterVolumeSpecName: "client-ca") pod "a10d8a72-81da-4077-b18f-ba448b4ddbf5" (UID: "a10d8a72-81da-4077-b18f-ba448b4ddbf5"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.723081 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-config" (OuterVolumeSpecName: "config") pod "a10d8a72-81da-4077-b18f-ba448b4ddbf5" (UID: "a10d8a72-81da-4077-b18f-ba448b4ddbf5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.725032 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a10d8a72-81da-4077-b18f-ba448b4ddbf5-kube-api-access-v7l8f" (OuterVolumeSpecName: "kube-api-access-v7l8f") pod "a10d8a72-81da-4077-b18f-ba448b4ddbf5" (UID: "a10d8a72-81da-4077-b18f-ba448b4ddbf5"). InnerVolumeSpecName "kube-api-access-v7l8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.725925 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a10d8a72-81da-4077-b18f-ba448b4ddbf5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a10d8a72-81da-4077-b18f-ba448b4ddbf5" (UID: "a10d8a72-81da-4077-b18f-ba448b4ddbf5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.824127 4884 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-client-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.824163 4884 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a10d8a72-81da-4077-b18f-ba448b4ddbf5-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.824175 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7l8f\" (UniqueName: \"kubernetes.io/projected/a10d8a72-81da-4077-b18f-ba448b4ddbf5-kube-api-access-v7l8f\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.824188 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:35:59 crc kubenswrapper[4884]: I1210 00:35:59.824198 4884 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a10d8a72-81da-4077-b18f-ba448b4ddbf5-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.373404 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" event={"ID":"ca1d2c87-d80a-4f09-a165-230f87d54641","Type":"ContainerDied","Data":"81edf91b430224f4f5c5e124848a2a217a3134646336a4e2c40bc489c3de7c62"} Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.373488 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.373845 4884 scope.go:117] "RemoveContainer" containerID="5635e950e439ba213a5f5648488fbcdd3255cd7460861fc5d97399d71149322d" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.375220 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" event={"ID":"a10d8a72-81da-4077-b18f-ba448b4ddbf5","Type":"ContainerDied","Data":"c5e970e4ac08fed03cf8df0052b8a0a3015f84ecacae3fef19aa5a829ca500aa"} Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.375270 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6488c7567-wqrht" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.395968 4884 scope.go:117] "RemoveContainer" containerID="559e9b0aeab72cd71d4c2bdf9f96fb31fb35905f91fc95c280a8de38555808c8" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.416240 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc"] Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.435526 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-d5775f87f-8d7mc"] Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.445503 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6488c7567-wqrht"] Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.454078 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6488c7567-wqrht"] Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.774150 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5b5446fddb-mv567"] Dec 10 00:36:00 crc kubenswrapper[4884]: E1210 00:36:00.775726 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e6acb02-4f9e-4771-9213-955aa68cfa43" containerName="registry-server" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.775775 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e6acb02-4f9e-4771-9213-955aa68cfa43" containerName="registry-server" Dec 10 00:36:00 crc kubenswrapper[4884]: E1210 00:36:00.775806 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca1d2c87-d80a-4f09-a165-230f87d54641" containerName="route-controller-manager" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.775821 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca1d2c87-d80a-4f09-a165-230f87d54641" containerName="route-controller-manager" Dec 10 00:36:00 crc kubenswrapper[4884]: E1210 00:36:00.775889 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e6acb02-4f9e-4771-9213-955aa68cfa43" containerName="extract-content" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.775937 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e6acb02-4f9e-4771-9213-955aa68cfa43" containerName="extract-content" Dec 10 00:36:00 crc kubenswrapper[4884]: E1210 00:36:00.775972 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e6acb02-4f9e-4771-9213-955aa68cfa43" containerName="extract-utilities" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.775986 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e6acb02-4f9e-4771-9213-955aa68cfa43" containerName="extract-utilities" Dec 10 00:36:00 crc kubenswrapper[4884]: E1210 00:36:00.776002 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a10d8a72-81da-4077-b18f-ba448b4ddbf5" containerName="controller-manager" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.776014 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a10d8a72-81da-4077-b18f-ba448b4ddbf5" containerName="controller-manager" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.776502 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca1d2c87-d80a-4f09-a165-230f87d54641" containerName="route-controller-manager" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.776529 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e6acb02-4f9e-4771-9213-955aa68cfa43" containerName="registry-server" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.776547 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a10d8a72-81da-4077-b18f-ba448b4ddbf5" containerName="controller-manager" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.777458 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.781947 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.782331 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.783830 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.785323 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.785628 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.787109 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.796094 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v"] Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.798633 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.804611 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.807942 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.807986 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.808026 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.810105 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.810224 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.812987 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.828072 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b5446fddb-mv567"] Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.850648 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v"] Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.941120 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mz7fw\" (UniqueName: \"kubernetes.io/projected/cde2cc31-ec02-4a2a-9e22-fca145fd4b95-kube-api-access-mz7fw\") pod \"route-controller-manager-74dcd8b68b-d6c5v\" (UID: \"cde2cc31-ec02-4a2a-9e22-fca145fd4b95\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.941256 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cde2cc31-ec02-4a2a-9e22-fca145fd4b95-client-ca\") pod \"route-controller-manager-74dcd8b68b-d6c5v\" (UID: \"cde2cc31-ec02-4a2a-9e22-fca145fd4b95\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.941301 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkfj2\" (UniqueName: \"kubernetes.io/projected/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-kube-api-access-pkfj2\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.941332 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cde2cc31-ec02-4a2a-9e22-fca145fd4b95-config\") pod \"route-controller-manager-74dcd8b68b-d6c5v\" (UID: \"cde2cc31-ec02-4a2a-9e22-fca145fd4b95\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.941360 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-serving-cert\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.941525 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-client-ca\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.941662 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cde2cc31-ec02-4a2a-9e22-fca145fd4b95-serving-cert\") pod \"route-controller-manager-74dcd8b68b-d6c5v\" (UID: \"cde2cc31-ec02-4a2a-9e22-fca145fd4b95\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.941735 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-config\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:00 crc kubenswrapper[4884]: I1210 00:36:00.941820 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-proxy-ca-bundles\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.042699 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cde2cc31-ec02-4a2a-9e22-fca145fd4b95-client-ca\") pod \"route-controller-manager-74dcd8b68b-d6c5v\" (UID: \"cde2cc31-ec02-4a2a-9e22-fca145fd4b95\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.042778 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkfj2\" (UniqueName: \"kubernetes.io/projected/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-kube-api-access-pkfj2\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.042819 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-serving-cert\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.042849 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cde2cc31-ec02-4a2a-9e22-fca145fd4b95-config\") pod \"route-controller-manager-74dcd8b68b-d6c5v\" (UID: \"cde2cc31-ec02-4a2a-9e22-fca145fd4b95\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.042880 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-client-ca\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.042934 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cde2cc31-ec02-4a2a-9e22-fca145fd4b95-serving-cert\") pod \"route-controller-manager-74dcd8b68b-d6c5v\" (UID: \"cde2cc31-ec02-4a2a-9e22-fca145fd4b95\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.042983 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-config\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.043025 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-proxy-ca-bundles\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.043080 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mz7fw\" (UniqueName: \"kubernetes.io/projected/cde2cc31-ec02-4a2a-9e22-fca145fd4b95-kube-api-access-mz7fw\") pod \"route-controller-manager-74dcd8b68b-d6c5v\" (UID: \"cde2cc31-ec02-4a2a-9e22-fca145fd4b95\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.045317 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cde2cc31-ec02-4a2a-9e22-fca145fd4b95-client-ca\") pod \"route-controller-manager-74dcd8b68b-d6c5v\" (UID: \"cde2cc31-ec02-4a2a-9e22-fca145fd4b95\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.053172 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-serving-cert\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.054940 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cde2cc31-ec02-4a2a-9e22-fca145fd4b95-config\") pod \"route-controller-manager-74dcd8b68b-d6c5v\" (UID: \"cde2cc31-ec02-4a2a-9e22-fca145fd4b95\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.057109 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-config\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.057604 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-proxy-ca-bundles\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.058771 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-client-ca\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.062622 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cde2cc31-ec02-4a2a-9e22-fca145fd4b95-serving-cert\") pod \"route-controller-manager-74dcd8b68b-d6c5v\" (UID: \"cde2cc31-ec02-4a2a-9e22-fca145fd4b95\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.066318 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mz7fw\" (UniqueName: \"kubernetes.io/projected/cde2cc31-ec02-4a2a-9e22-fca145fd4b95-kube-api-access-mz7fw\") pod \"route-controller-manager-74dcd8b68b-d6c5v\" (UID: \"cde2cc31-ec02-4a2a-9e22-fca145fd4b95\") " pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.076583 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkfj2\" (UniqueName: \"kubernetes.io/projected/dc5d5af1-c37a-4e60-9c2e-00d9251b11dc-kube-api-access-pkfj2\") pod \"controller-manager-5b5446fddb-mv567\" (UID: \"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc\") " pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.125892 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.143612 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.299641 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a10d8a72-81da-4077-b18f-ba448b4ddbf5" path="/var/lib/kubelet/pods/a10d8a72-81da-4077-b18f-ba448b4ddbf5/volumes" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.301635 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca1d2c87-d80a-4f09-a165-230f87d54641" path="/var/lib/kubelet/pods/ca1d2c87-d80a-4f09-a165-230f87d54641/volumes" Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.393909 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5b5446fddb-mv567"] Dec 10 00:36:01 crc kubenswrapper[4884]: I1210 00:36:01.431004 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v"] Dec 10 00:36:01 crc kubenswrapper[4884]: W1210 00:36:01.443235 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcde2cc31_ec02_4a2a_9e22_fca145fd4b95.slice/crio-956830010b1a0b2c0fa7e420fc11e24265fbff8a99db9375fc23633afb6026f9 WatchSource:0}: Error finding container 956830010b1a0b2c0fa7e420fc11e24265fbff8a99db9375fc23633afb6026f9: Status 404 returned error can't find the container with id 956830010b1a0b2c0fa7e420fc11e24265fbff8a99db9375fc23633afb6026f9 Dec 10 00:36:02 crc kubenswrapper[4884]: I1210 00:36:02.402727 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" event={"ID":"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc","Type":"ContainerStarted","Data":"e42eee441763020b662b5d5bc6a4d99d94df8c8eed4c4ecdb49b719d54f6fabc"} Dec 10 00:36:02 crc kubenswrapper[4884]: I1210 00:36:02.403108 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" event={"ID":"dc5d5af1-c37a-4e60-9c2e-00d9251b11dc","Type":"ContainerStarted","Data":"e30a59dcf6fe3657125832755d5e61455ef6b03b37c6eae38f2338dc89462f15"} Dec 10 00:36:02 crc kubenswrapper[4884]: I1210 00:36:02.403330 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:02 crc kubenswrapper[4884]: I1210 00:36:02.404917 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" event={"ID":"cde2cc31-ec02-4a2a-9e22-fca145fd4b95","Type":"ContainerStarted","Data":"0d8d942aca5d5e5d25da8c4a215e6b0a0b698ab70e52d38ecb6caa59dff0e970"} Dec 10 00:36:02 crc kubenswrapper[4884]: I1210 00:36:02.404960 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" event={"ID":"cde2cc31-ec02-4a2a-9e22-fca145fd4b95","Type":"ContainerStarted","Data":"956830010b1a0b2c0fa7e420fc11e24265fbff8a99db9375fc23633afb6026f9"} Dec 10 00:36:02 crc kubenswrapper[4884]: I1210 00:36:02.405347 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:02 crc kubenswrapper[4884]: I1210 00:36:02.409467 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" Dec 10 00:36:02 crc kubenswrapper[4884]: I1210 00:36:02.410566 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" Dec 10 00:36:02 crc kubenswrapper[4884]: I1210 00:36:02.423755 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5b5446fddb-mv567" podStartSLOduration=4.4237266250000005 podStartE2EDuration="4.423726625s" podCreationTimestamp="2025-12-10 00:35:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:36:02.423406406 +0000 UTC m=+335.501363533" watchObservedRunningTime="2025-12-10 00:36:02.423726625 +0000 UTC m=+335.501683752" Dec 10 00:36:02 crc kubenswrapper[4884]: I1210 00:36:02.448488 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-74dcd8b68b-d6c5v" podStartSLOduration=4.448418955 podStartE2EDuration="4.448418955s" podCreationTimestamp="2025-12-10 00:35:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:36:02.447399926 +0000 UTC m=+335.525357103" watchObservedRunningTime="2025-12-10 00:36:02.448418955 +0000 UTC m=+335.526376082" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.346398 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tpkdc"] Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.347745 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.360913 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tpkdc"] Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.544833 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9460e66c-707a-4622-9d5b-615231e211bc-trusted-ca\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.544988 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.545620 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9460e66c-707a-4622-9d5b-615231e211bc-bound-sa-token\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.545673 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9460e66c-707a-4622-9d5b-615231e211bc-registry-certificates\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.545725 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g8h2\" (UniqueName: \"kubernetes.io/projected/9460e66c-707a-4622-9d5b-615231e211bc-kube-api-access-4g8h2\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.545824 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9460e66c-707a-4622-9d5b-615231e211bc-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.545955 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9460e66c-707a-4622-9d5b-615231e211bc-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.546033 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9460e66c-707a-4622-9d5b-615231e211bc-registry-tls\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.584474 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.647843 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9460e66c-707a-4622-9d5b-615231e211bc-bound-sa-token\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.648644 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9460e66c-707a-4622-9d5b-615231e211bc-registry-certificates\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.650851 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9460e66c-707a-4622-9d5b-615231e211bc-registry-certificates\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.650980 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g8h2\" (UniqueName: \"kubernetes.io/projected/9460e66c-707a-4622-9d5b-615231e211bc-kube-api-access-4g8h2\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.652736 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9460e66c-707a-4622-9d5b-615231e211bc-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.654293 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9460e66c-707a-4622-9d5b-615231e211bc-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.654411 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9460e66c-707a-4622-9d5b-615231e211bc-registry-tls\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.654687 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9460e66c-707a-4622-9d5b-615231e211bc-trusted-ca\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.655609 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9460e66c-707a-4622-9d5b-615231e211bc-ca-trust-extracted\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.657750 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9460e66c-707a-4622-9d5b-615231e211bc-trusted-ca\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.664322 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9460e66c-707a-4622-9d5b-615231e211bc-installation-pull-secrets\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.670522 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9460e66c-707a-4622-9d5b-615231e211bc-registry-tls\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.671216 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9460e66c-707a-4622-9d5b-615231e211bc-bound-sa-token\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.673093 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g8h2\" (UniqueName: \"kubernetes.io/projected/9460e66c-707a-4622-9d5b-615231e211bc-kube-api-access-4g8h2\") pod \"image-registry-66df7c8f76-tpkdc\" (UID: \"9460e66c-707a-4622-9d5b-615231e211bc\") " pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:40 crc kubenswrapper[4884]: I1210 00:36:40.968074 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:41 crc kubenswrapper[4884]: I1210 00:36:41.401023 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-tpkdc"] Dec 10 00:36:41 crc kubenswrapper[4884]: I1210 00:36:41.751118 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" event={"ID":"9460e66c-707a-4622-9d5b-615231e211bc","Type":"ContainerStarted","Data":"c1f0b5558ef094dbc8ff4e107a3b557d91dc0e30a06596217dd9f99da7aa606b"} Dec 10 00:36:41 crc kubenswrapper[4884]: I1210 00:36:41.751527 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:36:41 crc kubenswrapper[4884]: I1210 00:36:41.751544 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" event={"ID":"9460e66c-707a-4622-9d5b-615231e211bc","Type":"ContainerStarted","Data":"22a8fef7ce57d21a3252867f774dcc39ec1d719b28284203e9c25b32294e3026"} Dec 10 00:36:41 crc kubenswrapper[4884]: I1210 00:36:41.783368 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" podStartSLOduration=1.783346995 podStartE2EDuration="1.783346995s" podCreationTimestamp="2025-12-10 00:36:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:36:41.780356113 +0000 UTC m=+374.858313260" watchObservedRunningTime="2025-12-10 00:36:41.783346995 +0000 UTC m=+374.861304122" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.490711 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sxshd"] Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.492246 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sxshd" podUID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" containerName="registry-server" containerID="cri-o://1a0ccb23c70ddfe5d57cee31f84a3ab3629c024c2f2689b434d21a89afc99fc2" gracePeriod=30 Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.497419 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f7d5j"] Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.497762 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-f7d5j" podUID="93607314-0a98-4da3-bfc3-5514b65e6580" containerName="registry-server" containerID="cri-o://a4f815d25e22b0417f9a6349e8e3c7e5625772e6d0de27c5f7d4a04533715488" gracePeriod=30 Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.504016 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cncdj"] Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.504378 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" podUID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" containerName="marketplace-operator" containerID="cri-o://d9028f908c919244a2fdaa126a47128c6d84f8430265221b40d121d5c62d61e9" gracePeriod=30 Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.507107 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6pgr"] Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.507414 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-h6pgr" podUID="12a04f77-054c-4a5d-80c5-2d64fcc137af" containerName="registry-server" containerID="cri-o://e441723387990beb0b7021b5580eb6713a43f7ca897a7199da883d6b4adfa4c5" gracePeriod=30 Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.510554 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-dccsc"] Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.511422 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.515291 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mnp4t"] Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.515582 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mnp4t" podUID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" containerName="registry-server" containerID="cri-o://ea165dc2a148b7c60377b990dd6439989607b283adce092d0d772c6df36b5fa3" gracePeriod=30 Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.519742 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-dccsc"] Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.653095 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2m7z\" (UniqueName: \"kubernetes.io/projected/aacf65ac-3787-462f-ad27-fa5663a86d99-kube-api-access-j2m7z\") pod \"marketplace-operator-79b997595-dccsc\" (UID: \"aacf65ac-3787-462f-ad27-fa5663a86d99\") " pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.653158 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/aacf65ac-3787-462f-ad27-fa5663a86d99-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-dccsc\" (UID: \"aacf65ac-3787-462f-ad27-fa5663a86d99\") " pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.653286 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aacf65ac-3787-462f-ad27-fa5663a86d99-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-dccsc\" (UID: \"aacf65ac-3787-462f-ad27-fa5663a86d99\") " pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.755017 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aacf65ac-3787-462f-ad27-fa5663a86d99-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-dccsc\" (UID: \"aacf65ac-3787-462f-ad27-fa5663a86d99\") " pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.755106 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2m7z\" (UniqueName: \"kubernetes.io/projected/aacf65ac-3787-462f-ad27-fa5663a86d99-kube-api-access-j2m7z\") pod \"marketplace-operator-79b997595-dccsc\" (UID: \"aacf65ac-3787-462f-ad27-fa5663a86d99\") " pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.755141 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/aacf65ac-3787-462f-ad27-fa5663a86d99-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-dccsc\" (UID: \"aacf65ac-3787-462f-ad27-fa5663a86d99\") " pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.757383 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aacf65ac-3787-462f-ad27-fa5663a86d99-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-dccsc\" (UID: \"aacf65ac-3787-462f-ad27-fa5663a86d99\") " pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.769948 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/aacf65ac-3787-462f-ad27-fa5663a86d99-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-dccsc\" (UID: \"aacf65ac-3787-462f-ad27-fa5663a86d99\") " pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.787212 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2m7z\" (UniqueName: \"kubernetes.io/projected/aacf65ac-3787-462f-ad27-fa5663a86d99-kube-api-access-j2m7z\") pod \"marketplace-operator-79b997595-dccsc\" (UID: \"aacf65ac-3787-462f-ad27-fa5663a86d99\") " pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.803256 4884 generic.go:334] "Generic (PLEG): container finished" podID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" containerID="1a0ccb23c70ddfe5d57cee31f84a3ab3629c024c2f2689b434d21a89afc99fc2" exitCode=0 Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.803355 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sxshd" event={"ID":"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd","Type":"ContainerDied","Data":"1a0ccb23c70ddfe5d57cee31f84a3ab3629c024c2f2689b434d21a89afc99fc2"} Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.808115 4884 generic.go:334] "Generic (PLEG): container finished" podID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" containerID="d9028f908c919244a2fdaa126a47128c6d84f8430265221b40d121d5c62d61e9" exitCode=0 Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.808162 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" event={"ID":"db8164d6-52ea-4ee2-b307-0acc3cbd72a9","Type":"ContainerDied","Data":"d9028f908c919244a2fdaa126a47128c6d84f8430265221b40d121d5c62d61e9"} Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.808189 4884 scope.go:117] "RemoveContainer" containerID="942d31a2ebc9e789159947fa3eccd4cd52e94c6531274973588c05e45ee61931" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.812316 4884 generic.go:334] "Generic (PLEG): container finished" podID="12a04f77-054c-4a5d-80c5-2d64fcc137af" containerID="e441723387990beb0b7021b5580eb6713a43f7ca897a7199da883d6b4adfa4c5" exitCode=0 Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.812353 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6pgr" event={"ID":"12a04f77-054c-4a5d-80c5-2d64fcc137af","Type":"ContainerDied","Data":"e441723387990beb0b7021b5580eb6713a43f7ca897a7199da883d6b4adfa4c5"} Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.821811 4884 generic.go:334] "Generic (PLEG): container finished" podID="93607314-0a98-4da3-bfc3-5514b65e6580" containerID="a4f815d25e22b0417f9a6349e8e3c7e5625772e6d0de27c5f7d4a04533715488" exitCode=0 Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.821914 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f7d5j" event={"ID":"93607314-0a98-4da3-bfc3-5514b65e6580","Type":"ContainerDied","Data":"a4f815d25e22b0417f9a6349e8e3c7e5625772e6d0de27c5f7d4a04533715488"} Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.826686 4884 generic.go:334] "Generic (PLEG): container finished" podID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" containerID="ea165dc2a148b7c60377b990dd6439989607b283adce092d0d772c6df36b5fa3" exitCode=0 Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.826745 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnp4t" event={"ID":"8eba9bb1-b07d-4d37-99f5-ddd952f9f681","Type":"ContainerDied","Data":"ea165dc2a148b7c60377b990dd6439989607b283adce092d0d772c6df36b5fa3"} Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.836036 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:44 crc kubenswrapper[4884]: I1210 00:36:44.933881 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.059010 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93607314-0a98-4da3-bfc3-5514b65e6580-catalog-content\") pod \"93607314-0a98-4da3-bfc3-5514b65e6580\" (UID: \"93607314-0a98-4da3-bfc3-5514b65e6580\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.059147 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzcvw\" (UniqueName: \"kubernetes.io/projected/93607314-0a98-4da3-bfc3-5514b65e6580-kube-api-access-lzcvw\") pod \"93607314-0a98-4da3-bfc3-5514b65e6580\" (UID: \"93607314-0a98-4da3-bfc3-5514b65e6580\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.059211 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93607314-0a98-4da3-bfc3-5514b65e6580-utilities\") pod \"93607314-0a98-4da3-bfc3-5514b65e6580\" (UID: \"93607314-0a98-4da3-bfc3-5514b65e6580\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.060611 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93607314-0a98-4da3-bfc3-5514b65e6580-utilities" (OuterVolumeSpecName: "utilities") pod "93607314-0a98-4da3-bfc3-5514b65e6580" (UID: "93607314-0a98-4da3-bfc3-5514b65e6580"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.090049 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93607314-0a98-4da3-bfc3-5514b65e6580-kube-api-access-lzcvw" (OuterVolumeSpecName: "kube-api-access-lzcvw") pod "93607314-0a98-4da3-bfc3-5514b65e6580" (UID: "93607314-0a98-4da3-bfc3-5514b65e6580"). InnerVolumeSpecName "kube-api-access-lzcvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.097321 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-dccsc"] Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.098086 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.111389 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.123741 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.130487 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.133907 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93607314-0a98-4da3-bfc3-5514b65e6580-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93607314-0a98-4da3-bfc3-5514b65e6580" (UID: "93607314-0a98-4da3-bfc3-5514b65e6580"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.160812 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-utilities\") pod \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\" (UID: \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.160900 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-catalog-content\") pod \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\" (UID: \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.160960 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vpkzz\" (UniqueName: \"kubernetes.io/projected/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-kube-api-access-vpkzz\") pod \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\" (UID: \"8eba9bb1-b07d-4d37-99f5-ddd952f9f681\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.161268 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzcvw\" (UniqueName: \"kubernetes.io/projected/93607314-0a98-4da3-bfc3-5514b65e6580-kube-api-access-lzcvw\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.161288 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93607314-0a98-4da3-bfc3-5514b65e6580-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.161299 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93607314-0a98-4da3-bfc3-5514b65e6580-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.162650 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-utilities" (OuterVolumeSpecName: "utilities") pod "8eba9bb1-b07d-4d37-99f5-ddd952f9f681" (UID: "8eba9bb1-b07d-4d37-99f5-ddd952f9f681"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.168100 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-kube-api-access-vpkzz" (OuterVolumeSpecName: "kube-api-access-vpkzz") pod "8eba9bb1-b07d-4d37-99f5-ddd952f9f681" (UID: "8eba9bb1-b07d-4d37-99f5-ddd952f9f681"). InnerVolumeSpecName "kube-api-access-vpkzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.261721 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bl2g\" (UniqueName: \"kubernetes.io/projected/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-kube-api-access-6bl2g\") pod \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\" (UID: \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.261765 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfklc\" (UniqueName: \"kubernetes.io/projected/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-kube-api-access-tfklc\") pod \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\" (UID: \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.261828 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-marketplace-trusted-ca\") pod \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\" (UID: \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.261858 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqngc\" (UniqueName: \"kubernetes.io/projected/12a04f77-054c-4a5d-80c5-2d64fcc137af-kube-api-access-jqngc\") pod \"12a04f77-054c-4a5d-80c5-2d64fcc137af\" (UID: \"12a04f77-054c-4a5d-80c5-2d64fcc137af\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.261890 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12a04f77-054c-4a5d-80c5-2d64fcc137af-catalog-content\") pod \"12a04f77-054c-4a5d-80c5-2d64fcc137af\" (UID: \"12a04f77-054c-4a5d-80c5-2d64fcc137af\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.261983 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12a04f77-054c-4a5d-80c5-2d64fcc137af-utilities\") pod \"12a04f77-054c-4a5d-80c5-2d64fcc137af\" (UID: \"12a04f77-054c-4a5d-80c5-2d64fcc137af\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.262009 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-utilities\") pod \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\" (UID: \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.262064 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-catalog-content\") pod \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\" (UID: \"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.262095 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-marketplace-operator-metrics\") pod \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\" (UID: \"db8164d6-52ea-4ee2-b307-0acc3cbd72a9\") " Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.262390 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.262413 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vpkzz\" (UniqueName: \"kubernetes.io/projected/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-kube-api-access-vpkzz\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.262882 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12a04f77-054c-4a5d-80c5-2d64fcc137af-utilities" (OuterVolumeSpecName: "utilities") pod "12a04f77-054c-4a5d-80c5-2d64fcc137af" (UID: "12a04f77-054c-4a5d-80c5-2d64fcc137af"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.262986 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "db8164d6-52ea-4ee2-b307-0acc3cbd72a9" (UID: "db8164d6-52ea-4ee2-b307-0acc3cbd72a9"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.263316 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-utilities" (OuterVolumeSpecName: "utilities") pod "89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" (UID: "89270eb2-fb6c-41ec-bed8-ff42dc28c6fd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.266030 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12a04f77-054c-4a5d-80c5-2d64fcc137af-kube-api-access-jqngc" (OuterVolumeSpecName: "kube-api-access-jqngc") pod "12a04f77-054c-4a5d-80c5-2d64fcc137af" (UID: "12a04f77-054c-4a5d-80c5-2d64fcc137af"). InnerVolumeSpecName "kube-api-access-jqngc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.266885 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "db8164d6-52ea-4ee2-b307-0acc3cbd72a9" (UID: "db8164d6-52ea-4ee2-b307-0acc3cbd72a9"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.284075 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-kube-api-access-tfklc" (OuterVolumeSpecName: "kube-api-access-tfklc") pod "db8164d6-52ea-4ee2-b307-0acc3cbd72a9" (UID: "db8164d6-52ea-4ee2-b307-0acc3cbd72a9"). InnerVolumeSpecName "kube-api-access-tfklc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.287134 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-kube-api-access-6bl2g" (OuterVolumeSpecName: "kube-api-access-6bl2g") pod "89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" (UID: "89270eb2-fb6c-41ec-bed8-ff42dc28c6fd"). InnerVolumeSpecName "kube-api-access-6bl2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.301103 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12a04f77-054c-4a5d-80c5-2d64fcc137af-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12a04f77-054c-4a5d-80c5-2d64fcc137af" (UID: "12a04f77-054c-4a5d-80c5-2d64fcc137af"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.342943 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8eba9bb1-b07d-4d37-99f5-ddd952f9f681" (UID: "8eba9bb1-b07d-4d37-99f5-ddd952f9f681"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.347175 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" (UID: "89270eb2-fb6c-41ec-bed8-ff42dc28c6fd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.364418 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12a04f77-054c-4a5d-80c5-2d64fcc137af-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.364493 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.364505 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.364580 4884 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.364593 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bl2g\" (UniqueName: \"kubernetes.io/projected/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd-kube-api-access-6bl2g\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.364603 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfklc\" (UniqueName: \"kubernetes.io/projected/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-kube-api-access-tfklc\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.364613 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eba9bb1-b07d-4d37-99f5-ddd952f9f681-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.364628 4884 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/db8164d6-52ea-4ee2-b307-0acc3cbd72a9-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.364662 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqngc\" (UniqueName: \"kubernetes.io/projected/12a04f77-054c-4a5d-80c5-2d64fcc137af-kube-api-access-jqngc\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.364672 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12a04f77-054c-4a5d-80c5-2d64fcc137af-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.838192 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-f7d5j" event={"ID":"93607314-0a98-4da3-bfc3-5514b65e6580","Type":"ContainerDied","Data":"4d39ad2ee9382d1b5a3aca5460d229a105dbf99505275770381bbc8e51c992b2"} Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.838263 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-f7d5j" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.838377 4884 scope.go:117] "RemoveContainer" containerID="a4f815d25e22b0417f9a6349e8e3c7e5625772e6d0de27c5f7d4a04533715488" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.843107 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mnp4t" event={"ID":"8eba9bb1-b07d-4d37-99f5-ddd952f9f681","Type":"ContainerDied","Data":"8ee4d6a81b235cf486abc75d7d33a3b105db7c5ee1bcd68327e16f6c8f252245"} Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.843177 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mnp4t" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.848086 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sxshd" event={"ID":"89270eb2-fb6c-41ec-bed8-ff42dc28c6fd","Type":"ContainerDied","Data":"9845a990e4d1a071978e5f247e9e578c07ad7db5a29e576137ff81540a56fe2d"} Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.848478 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sxshd" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.850547 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" event={"ID":"db8164d6-52ea-4ee2-b307-0acc3cbd72a9","Type":"ContainerDied","Data":"d4216dfbf87c579f8e22c90114ce1fea65117beffbb4e9151d2585b7f8e625f3"} Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.850649 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.853790 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h6pgr" event={"ID":"12a04f77-054c-4a5d-80c5-2d64fcc137af","Type":"ContainerDied","Data":"1ffd331c45defaa50e288327f3f6528262b3f4acdfc4a30b42d550942895844b"} Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.853951 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h6pgr" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.856238 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" event={"ID":"aacf65ac-3787-462f-ad27-fa5663a86d99","Type":"ContainerStarted","Data":"e97c78f0347721b46f11ff0df81fa1876ab4ac864c3afecfe4417dc6858cf1c6"} Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.856321 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" event={"ID":"aacf65ac-3787-462f-ad27-fa5663a86d99","Type":"ContainerStarted","Data":"80511d80407cac764f50d167526be791efa2f0494c26fbbdedc5714aa33a8944"} Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.856766 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.871656 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.877379 4884 scope.go:117] "RemoveContainer" containerID="83ddf8adbc3dce0f60845ddb5598a277aa903889b7996d3799b26af471bfa150" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.885185 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-f7d5j"] Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.891079 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-f7d5j"] Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.909354 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6pgr"] Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.909444 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-h6pgr"] Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.940779 4884 scope.go:117] "RemoveContainer" containerID="7c5009e1d415f00004659611cbe66841d70456d125a22e85ccb5881bc02d0b56" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.958051 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-dccsc" podStartSLOduration=1.958015356 podStartE2EDuration="1.958015356s" podCreationTimestamp="2025-12-10 00:36:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:36:45.9400048 +0000 UTC m=+379.017961947" watchObservedRunningTime="2025-12-10 00:36:45.958015356 +0000 UTC m=+379.035972503" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.969985 4884 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-cncdj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.26:8080/healthz\": dial tcp 10.217.0.26:8080: i/o timeout (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.970044 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-cncdj" podUID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.26:8080/healthz\": dial tcp 10.217.0.26:8080: i/o timeout (Client.Timeout exceeded while awaiting headers)" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.973561 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mnp4t"] Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.978477 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mnp4t"] Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.980896 4884 scope.go:117] "RemoveContainer" containerID="ea165dc2a148b7c60377b990dd6439989607b283adce092d0d772c6df36b5fa3" Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.981327 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cncdj"] Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.984331 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cncdj"] Dec 10 00:36:45 crc kubenswrapper[4884]: I1210 00:36:45.996091 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sxshd"] Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.001675 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sxshd"] Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.011851 4884 scope.go:117] "RemoveContainer" containerID="68e777288d56b4a49c879d5123cf09286ce9e7da78d79013c209a9aee7b89d1c" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.036267 4884 scope.go:117] "RemoveContainer" containerID="bc7239d47d94b557e47551a10b2d9506c4905beec3271b3750f21dedbb9d605c" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.055045 4884 scope.go:117] "RemoveContainer" containerID="1a0ccb23c70ddfe5d57cee31f84a3ab3629c024c2f2689b434d21a89afc99fc2" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.083183 4884 scope.go:117] "RemoveContainer" containerID="d1961d4d21c795e52b61b681c99e44c56c889e9aa6bf0dcaeb3aeb1158642942" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.109022 4884 scope.go:117] "RemoveContainer" containerID="a288f222a79f9eebde57528a3a9c37623f3d05bdbaf17718efb94883366dade1" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.132531 4884 scope.go:117] "RemoveContainer" containerID="d9028f908c919244a2fdaa126a47128c6d84f8430265221b40d121d5c62d61e9" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.146673 4884 scope.go:117] "RemoveContainer" containerID="e441723387990beb0b7021b5580eb6713a43f7ca897a7199da883d6b4adfa4c5" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.165273 4884 scope.go:117] "RemoveContainer" containerID="9ba2c7bb2984aba4f501b8d611d90492f663401aea53f12ae93841c700efb749" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.183205 4884 scope.go:117] "RemoveContainer" containerID="b0594ec62925f87e7c0d6bc4c1a9d1f7da28a830dc99e8795bfe1e7e6a074d4d" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.685703 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-c95cd"] Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.686480 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12a04f77-054c-4a5d-80c5-2d64fcc137af" containerName="extract-content" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.686585 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="12a04f77-054c-4a5d-80c5-2d64fcc137af" containerName="extract-content" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.686714 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" containerName="extract-utilities" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.686793 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" containerName="extract-utilities" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.686873 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12a04f77-054c-4a5d-80c5-2d64fcc137af" containerName="extract-utilities" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.686959 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="12a04f77-054c-4a5d-80c5-2d64fcc137af" containerName="extract-utilities" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.687045 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" containerName="registry-server" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.687120 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" containerName="registry-server" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.687179 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" containerName="extract-utilities" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.687232 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" containerName="extract-utilities" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.687294 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" containerName="extract-content" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.687353 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" containerName="extract-content" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.687417 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93607314-0a98-4da3-bfc3-5514b65e6580" containerName="extract-utilities" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.687496 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93607314-0a98-4da3-bfc3-5514b65e6580" containerName="extract-utilities" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.687554 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" containerName="marketplace-operator" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.687610 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" containerName="marketplace-operator" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.687668 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12a04f77-054c-4a5d-80c5-2d64fcc137af" containerName="registry-server" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.687720 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="12a04f77-054c-4a5d-80c5-2d64fcc137af" containerName="registry-server" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.687774 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" containerName="marketplace-operator" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.687912 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" containerName="marketplace-operator" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.687977 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93607314-0a98-4da3-bfc3-5514b65e6580" containerName="registry-server" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.688035 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93607314-0a98-4da3-bfc3-5514b65e6580" containerName="registry-server" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.688096 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93607314-0a98-4da3-bfc3-5514b65e6580" containerName="extract-content" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.688150 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93607314-0a98-4da3-bfc3-5514b65e6580" containerName="extract-content" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.688203 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" containerName="extract-content" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.688262 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" containerName="extract-content" Dec 10 00:36:46 crc kubenswrapper[4884]: E1210 00:36:46.688318 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" containerName="registry-server" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.688373 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" containerName="registry-server" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.688562 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" containerName="marketplace-operator" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.688640 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="12a04f77-054c-4a5d-80c5-2d64fcc137af" containerName="registry-server" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.688708 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="93607314-0a98-4da3-bfc3-5514b65e6580" containerName="registry-server" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.688773 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" containerName="registry-server" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.688855 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" containerName="registry-server" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.689073 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" containerName="marketplace-operator" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.704756 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.705216 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c95cd"] Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.707831 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.786246 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrxbv\" (UniqueName: \"kubernetes.io/projected/d888450e-2455-4c74-9931-699668b137a8-kube-api-access-jrxbv\") pod \"redhat-marketplace-c95cd\" (UID: \"d888450e-2455-4c74-9931-699668b137a8\") " pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.786556 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d888450e-2455-4c74-9931-699668b137a8-utilities\") pod \"redhat-marketplace-c95cd\" (UID: \"d888450e-2455-4c74-9931-699668b137a8\") " pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.786670 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d888450e-2455-4c74-9931-699668b137a8-catalog-content\") pod \"redhat-marketplace-c95cd\" (UID: \"d888450e-2455-4c74-9931-699668b137a8\") " pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.888684 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrxbv\" (UniqueName: \"kubernetes.io/projected/d888450e-2455-4c74-9931-699668b137a8-kube-api-access-jrxbv\") pod \"redhat-marketplace-c95cd\" (UID: \"d888450e-2455-4c74-9931-699668b137a8\") " pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.888729 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d888450e-2455-4c74-9931-699668b137a8-utilities\") pod \"redhat-marketplace-c95cd\" (UID: \"d888450e-2455-4c74-9931-699668b137a8\") " pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.888773 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d888450e-2455-4c74-9931-699668b137a8-catalog-content\") pod \"redhat-marketplace-c95cd\" (UID: \"d888450e-2455-4c74-9931-699668b137a8\") " pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.889210 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d888450e-2455-4c74-9931-699668b137a8-catalog-content\") pod \"redhat-marketplace-c95cd\" (UID: \"d888450e-2455-4c74-9931-699668b137a8\") " pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.889410 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d888450e-2455-4c74-9931-699668b137a8-utilities\") pod \"redhat-marketplace-c95cd\" (UID: \"d888450e-2455-4c74-9931-699668b137a8\") " pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.892509 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lz2kk"] Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.894069 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.896974 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.919941 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrxbv\" (UniqueName: \"kubernetes.io/projected/d888450e-2455-4c74-9931-699668b137a8-kube-api-access-jrxbv\") pod \"redhat-marketplace-c95cd\" (UID: \"d888450e-2455-4c74-9931-699668b137a8\") " pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.928567 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lz2kk"] Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.990062 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2faf949b-c390-442b-8b2c-dc394506f3d4-catalog-content\") pod \"redhat-operators-lz2kk\" (UID: \"2faf949b-c390-442b-8b2c-dc394506f3d4\") " pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.990152 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2faf949b-c390-442b-8b2c-dc394506f3d4-utilities\") pod \"redhat-operators-lz2kk\" (UID: \"2faf949b-c390-442b-8b2c-dc394506f3d4\") " pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:46 crc kubenswrapper[4884]: I1210 00:36:46.990244 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2j9z6\" (UniqueName: \"kubernetes.io/projected/2faf949b-c390-442b-8b2c-dc394506f3d4-kube-api-access-2j9z6\") pod \"redhat-operators-lz2kk\" (UID: \"2faf949b-c390-442b-8b2c-dc394506f3d4\") " pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.019033 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.093536 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2faf949b-c390-442b-8b2c-dc394506f3d4-catalog-content\") pod \"redhat-operators-lz2kk\" (UID: \"2faf949b-c390-442b-8b2c-dc394506f3d4\") " pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.094082 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2faf949b-c390-442b-8b2c-dc394506f3d4-catalog-content\") pod \"redhat-operators-lz2kk\" (UID: \"2faf949b-c390-442b-8b2c-dc394506f3d4\") " pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.094244 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2faf949b-c390-442b-8b2c-dc394506f3d4-utilities\") pod \"redhat-operators-lz2kk\" (UID: \"2faf949b-c390-442b-8b2c-dc394506f3d4\") " pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.094601 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2faf949b-c390-442b-8b2c-dc394506f3d4-utilities\") pod \"redhat-operators-lz2kk\" (UID: \"2faf949b-c390-442b-8b2c-dc394506f3d4\") " pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.094726 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2j9z6\" (UniqueName: \"kubernetes.io/projected/2faf949b-c390-442b-8b2c-dc394506f3d4-kube-api-access-2j9z6\") pod \"redhat-operators-lz2kk\" (UID: \"2faf949b-c390-442b-8b2c-dc394506f3d4\") " pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.114501 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2j9z6\" (UniqueName: \"kubernetes.io/projected/2faf949b-c390-442b-8b2c-dc394506f3d4-kube-api-access-2j9z6\") pod \"redhat-operators-lz2kk\" (UID: \"2faf949b-c390-442b-8b2c-dc394506f3d4\") " pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.247117 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.308610 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12a04f77-054c-4a5d-80c5-2d64fcc137af" path="/var/lib/kubelet/pods/12a04f77-054c-4a5d-80c5-2d64fcc137af/volumes" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.314705 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89270eb2-fb6c-41ec-bed8-ff42dc28c6fd" path="/var/lib/kubelet/pods/89270eb2-fb6c-41ec-bed8-ff42dc28c6fd/volumes" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.317734 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8eba9bb1-b07d-4d37-99f5-ddd952f9f681" path="/var/lib/kubelet/pods/8eba9bb1-b07d-4d37-99f5-ddd952f9f681/volumes" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.318997 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93607314-0a98-4da3-bfc3-5514b65e6580" path="/var/lib/kubelet/pods/93607314-0a98-4da3-bfc3-5514b65e6580/volumes" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.319646 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db8164d6-52ea-4ee2-b307-0acc3cbd72a9" path="/var/lib/kubelet/pods/db8164d6-52ea-4ee2-b307-0acc3cbd72a9/volumes" Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.451731 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lz2kk"] Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.455484 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-c95cd"] Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.878901 4884 generic.go:334] "Generic (PLEG): container finished" podID="2faf949b-c390-442b-8b2c-dc394506f3d4" containerID="5c294f422eff7b9fea2ffd1faff660dd9b7848b050ea1b49473a7e4fc9441f12" exitCode=0 Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.879004 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lz2kk" event={"ID":"2faf949b-c390-442b-8b2c-dc394506f3d4","Type":"ContainerDied","Data":"5c294f422eff7b9fea2ffd1faff660dd9b7848b050ea1b49473a7e4fc9441f12"} Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.879039 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lz2kk" event={"ID":"2faf949b-c390-442b-8b2c-dc394506f3d4","Type":"ContainerStarted","Data":"eb237bbee8a998b13212bed364bdc2386d7d51f0b5f2bc11aa3b7f3c060ad682"} Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.882849 4884 generic.go:334] "Generic (PLEG): container finished" podID="d888450e-2455-4c74-9931-699668b137a8" containerID="ed68073e5941f1a4842195480f8545858f5f60dab3969c2cbd871911d5d85c0a" exitCode=0 Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.882937 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c95cd" event={"ID":"d888450e-2455-4c74-9931-699668b137a8","Type":"ContainerDied","Data":"ed68073e5941f1a4842195480f8545858f5f60dab3969c2cbd871911d5d85c0a"} Dec 10 00:36:47 crc kubenswrapper[4884]: I1210 00:36:47.882957 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c95cd" event={"ID":"d888450e-2455-4c74-9931-699668b137a8","Type":"ContainerStarted","Data":"fece8905949d92b95d75d36b9204b307f80110c8dd5984fbbc160a49b004425c"} Dec 10 00:36:48 crc kubenswrapper[4884]: I1210 00:36:48.099071 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:36:48 crc kubenswrapper[4884]: I1210 00:36:48.099677 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:36:48 crc kubenswrapper[4884]: I1210 00:36:48.893721 4884 generic.go:334] "Generic (PLEG): container finished" podID="d888450e-2455-4c74-9931-699668b137a8" containerID="927dadec9ef89b43ab2f74f88eabd3c1a794c2dc2e6f39916d97d0d33bd644a9" exitCode=0 Dec 10 00:36:48 crc kubenswrapper[4884]: I1210 00:36:48.893774 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c95cd" event={"ID":"d888450e-2455-4c74-9931-699668b137a8","Type":"ContainerDied","Data":"927dadec9ef89b43ab2f74f88eabd3c1a794c2dc2e6f39916d97d0d33bd644a9"} Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.097107 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dgn4r"] Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.099637 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.107708 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dgn4r"] Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.141989 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.244095 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8078d872-21e8-4dfa-a907-5b3ce3759920-catalog-content\") pod \"community-operators-dgn4r\" (UID: \"8078d872-21e8-4dfa-a907-5b3ce3759920\") " pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.244184 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62kkf\" (UniqueName: \"kubernetes.io/projected/8078d872-21e8-4dfa-a907-5b3ce3759920-kube-api-access-62kkf\") pod \"community-operators-dgn4r\" (UID: \"8078d872-21e8-4dfa-a907-5b3ce3759920\") " pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.244274 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8078d872-21e8-4dfa-a907-5b3ce3759920-utilities\") pod \"community-operators-dgn4r\" (UID: \"8078d872-21e8-4dfa-a907-5b3ce3759920\") " pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.293770 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7vf7m"] Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.294921 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.299787 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.302686 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7vf7m"] Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.345797 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8078d872-21e8-4dfa-a907-5b3ce3759920-catalog-content\") pod \"community-operators-dgn4r\" (UID: \"8078d872-21e8-4dfa-a907-5b3ce3759920\") " pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.345872 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62kkf\" (UniqueName: \"kubernetes.io/projected/8078d872-21e8-4dfa-a907-5b3ce3759920-kube-api-access-62kkf\") pod \"community-operators-dgn4r\" (UID: \"8078d872-21e8-4dfa-a907-5b3ce3759920\") " pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.345913 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8078d872-21e8-4dfa-a907-5b3ce3759920-utilities\") pod \"community-operators-dgn4r\" (UID: \"8078d872-21e8-4dfa-a907-5b3ce3759920\") " pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.346520 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8078d872-21e8-4dfa-a907-5b3ce3759920-catalog-content\") pod \"community-operators-dgn4r\" (UID: \"8078d872-21e8-4dfa-a907-5b3ce3759920\") " pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.346543 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8078d872-21e8-4dfa-a907-5b3ce3759920-utilities\") pod \"community-operators-dgn4r\" (UID: \"8078d872-21e8-4dfa-a907-5b3ce3759920\") " pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.371219 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62kkf\" (UniqueName: \"kubernetes.io/projected/8078d872-21e8-4dfa-a907-5b3ce3759920-kube-api-access-62kkf\") pod \"community-operators-dgn4r\" (UID: \"8078d872-21e8-4dfa-a907-5b3ce3759920\") " pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.451729 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/218fb300-2f8e-4ba7-a947-a45ed8426678-utilities\") pod \"certified-operators-7vf7m\" (UID: \"218fb300-2f8e-4ba7-a947-a45ed8426678\") " pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.451914 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/218fb300-2f8e-4ba7-a947-a45ed8426678-catalog-content\") pod \"certified-operators-7vf7m\" (UID: \"218fb300-2f8e-4ba7-a947-a45ed8426678\") " pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.451966 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qn974\" (UniqueName: \"kubernetes.io/projected/218fb300-2f8e-4ba7-a947-a45ed8426678-kube-api-access-qn974\") pod \"certified-operators-7vf7m\" (UID: \"218fb300-2f8e-4ba7-a947-a45ed8426678\") " pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.459525 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.552897 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/218fb300-2f8e-4ba7-a947-a45ed8426678-utilities\") pod \"certified-operators-7vf7m\" (UID: \"218fb300-2f8e-4ba7-a947-a45ed8426678\") " pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.552983 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/218fb300-2f8e-4ba7-a947-a45ed8426678-catalog-content\") pod \"certified-operators-7vf7m\" (UID: \"218fb300-2f8e-4ba7-a947-a45ed8426678\") " pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.553012 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qn974\" (UniqueName: \"kubernetes.io/projected/218fb300-2f8e-4ba7-a947-a45ed8426678-kube-api-access-qn974\") pod \"certified-operators-7vf7m\" (UID: \"218fb300-2f8e-4ba7-a947-a45ed8426678\") " pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.557108 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/218fb300-2f8e-4ba7-a947-a45ed8426678-catalog-content\") pod \"certified-operators-7vf7m\" (UID: \"218fb300-2f8e-4ba7-a947-a45ed8426678\") " pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.557161 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/218fb300-2f8e-4ba7-a947-a45ed8426678-utilities\") pod \"certified-operators-7vf7m\" (UID: \"218fb300-2f8e-4ba7-a947-a45ed8426678\") " pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.577124 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qn974\" (UniqueName: \"kubernetes.io/projected/218fb300-2f8e-4ba7-a947-a45ed8426678-kube-api-access-qn974\") pod \"certified-operators-7vf7m\" (UID: \"218fb300-2f8e-4ba7-a947-a45ed8426678\") " pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.610386 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.834858 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7vf7m"] Dec 10 00:36:49 crc kubenswrapper[4884]: W1210 00:36:49.838749 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod218fb300_2f8e_4ba7_a947_a45ed8426678.slice/crio-dc9f8f5d7e876e1c29259e8517e84469c9ec730fa943bcded38cda40c4f79d26 WatchSource:0}: Error finding container dc9f8f5d7e876e1c29259e8517e84469c9ec730fa943bcded38cda40c4f79d26: Status 404 returned error can't find the container with id dc9f8f5d7e876e1c29259e8517e84469c9ec730fa943bcded38cda40c4f79d26 Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.900394 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7vf7m" event={"ID":"218fb300-2f8e-4ba7-a947-a45ed8426678","Type":"ContainerStarted","Data":"dc9f8f5d7e876e1c29259e8517e84469c9ec730fa943bcded38cda40c4f79d26"} Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.905190 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-c95cd" event={"ID":"d888450e-2455-4c74-9931-699668b137a8","Type":"ContainerStarted","Data":"44a7a432a38677091c2dc391b0e2f5cc1cab57141adf755d06e3f5ca9bcadc39"} Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.907169 4884 generic.go:334] "Generic (PLEG): container finished" podID="2faf949b-c390-442b-8b2c-dc394506f3d4" containerID="f560ecfed8fc068c415a32934609d6c19c093aa29d505843af1ada487d9e3d51" exitCode=0 Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.907212 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lz2kk" event={"ID":"2faf949b-c390-442b-8b2c-dc394506f3d4","Type":"ContainerDied","Data":"f560ecfed8fc068c415a32934609d6c19c093aa29d505843af1ada487d9e3d51"} Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.910397 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dgn4r"] Dec 10 00:36:49 crc kubenswrapper[4884]: W1210 00:36:49.920626 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8078d872_21e8_4dfa_a907_5b3ce3759920.slice/crio-e26f9cc10c04d74cd2b0dd234f2b9bf9c32267fcfdf06567cbb59890065bdc26 WatchSource:0}: Error finding container e26f9cc10c04d74cd2b0dd234f2b9bf9c32267fcfdf06567cbb59890065bdc26: Status 404 returned error can't find the container with id e26f9cc10c04d74cd2b0dd234f2b9bf9c32267fcfdf06567cbb59890065bdc26 Dec 10 00:36:49 crc kubenswrapper[4884]: I1210 00:36:49.927851 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-c95cd" podStartSLOduration=2.293045502 podStartE2EDuration="3.927779636s" podCreationTimestamp="2025-12-10 00:36:46 +0000 UTC" firstStartedPulling="2025-12-10 00:36:47.884141814 +0000 UTC m=+380.962098931" lastFinishedPulling="2025-12-10 00:36:49.518875958 +0000 UTC m=+382.596833065" observedRunningTime="2025-12-10 00:36:49.923111207 +0000 UTC m=+383.001068324" watchObservedRunningTime="2025-12-10 00:36:49.927779636 +0000 UTC m=+383.005736753" Dec 10 00:36:50 crc kubenswrapper[4884]: I1210 00:36:50.915718 4884 generic.go:334] "Generic (PLEG): container finished" podID="218fb300-2f8e-4ba7-a947-a45ed8426678" containerID="31a47d4aeb33d7ae69b0709f7d365525025b568d414ee06cfda49d6247b16da0" exitCode=0 Dec 10 00:36:50 crc kubenswrapper[4884]: I1210 00:36:50.916130 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7vf7m" event={"ID":"218fb300-2f8e-4ba7-a947-a45ed8426678","Type":"ContainerDied","Data":"31a47d4aeb33d7ae69b0709f7d365525025b568d414ee06cfda49d6247b16da0"} Dec 10 00:36:50 crc kubenswrapper[4884]: I1210 00:36:50.923032 4884 generic.go:334] "Generic (PLEG): container finished" podID="8078d872-21e8-4dfa-a907-5b3ce3759920" containerID="57f270c5e11e72cb6c57882f74efe61073adbb1f920145be29d7adb6845b8617" exitCode=0 Dec 10 00:36:50 crc kubenswrapper[4884]: I1210 00:36:50.923633 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgn4r" event={"ID":"8078d872-21e8-4dfa-a907-5b3ce3759920","Type":"ContainerDied","Data":"57f270c5e11e72cb6c57882f74efe61073adbb1f920145be29d7adb6845b8617"} Dec 10 00:36:50 crc kubenswrapper[4884]: I1210 00:36:50.923668 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgn4r" event={"ID":"8078d872-21e8-4dfa-a907-5b3ce3759920","Type":"ContainerStarted","Data":"e26f9cc10c04d74cd2b0dd234f2b9bf9c32267fcfdf06567cbb59890065bdc26"} Dec 10 00:36:51 crc kubenswrapper[4884]: I1210 00:36:51.949754 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7vf7m" event={"ID":"218fb300-2f8e-4ba7-a947-a45ed8426678","Type":"ContainerStarted","Data":"b42b82b0c500798a88adee3ae07348a1330dfb697e5e4e5a4a5d671dfb4f8894"} Dec 10 00:36:51 crc kubenswrapper[4884]: I1210 00:36:51.972151 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lz2kk" event={"ID":"2faf949b-c390-442b-8b2c-dc394506f3d4","Type":"ContainerStarted","Data":"5bc54d82157d9cba42e5c1bebe31320c69b0cec1f718cd0630e3a4597994ec08"} Dec 10 00:36:51 crc kubenswrapper[4884]: I1210 00:36:51.996020 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lz2kk" podStartSLOduration=2.402598665 podStartE2EDuration="5.995998044s" podCreationTimestamp="2025-12-10 00:36:46 +0000 UTC" firstStartedPulling="2025-12-10 00:36:47.882833297 +0000 UTC m=+380.960790414" lastFinishedPulling="2025-12-10 00:36:51.476232676 +0000 UTC m=+384.554189793" observedRunningTime="2025-12-10 00:36:51.991057199 +0000 UTC m=+385.069014316" watchObservedRunningTime="2025-12-10 00:36:51.995998044 +0000 UTC m=+385.073955161" Dec 10 00:36:52 crc kubenswrapper[4884]: I1210 00:36:52.979453 4884 generic.go:334] "Generic (PLEG): container finished" podID="218fb300-2f8e-4ba7-a947-a45ed8426678" containerID="b42b82b0c500798a88adee3ae07348a1330dfb697e5e4e5a4a5d671dfb4f8894" exitCode=0 Dec 10 00:36:52 crc kubenswrapper[4884]: I1210 00:36:52.979508 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7vf7m" event={"ID":"218fb300-2f8e-4ba7-a947-a45ed8426678","Type":"ContainerDied","Data":"b42b82b0c500798a88adee3ae07348a1330dfb697e5e4e5a4a5d671dfb4f8894"} Dec 10 00:36:52 crc kubenswrapper[4884]: I1210 00:36:52.982614 4884 generic.go:334] "Generic (PLEG): container finished" podID="8078d872-21e8-4dfa-a907-5b3ce3759920" containerID="d1a493e23bf340e3bc2c7bc33f15c9444b49605791a83d8f235ab16172f3ffdf" exitCode=0 Dec 10 00:36:52 crc kubenswrapper[4884]: I1210 00:36:52.982674 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgn4r" event={"ID":"8078d872-21e8-4dfa-a907-5b3ce3759920","Type":"ContainerDied","Data":"d1a493e23bf340e3bc2c7bc33f15c9444b49605791a83d8f235ab16172f3ffdf"} Dec 10 00:36:55 crc kubenswrapper[4884]: I1210 00:36:54.999550 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgn4r" event={"ID":"8078d872-21e8-4dfa-a907-5b3ce3759920","Type":"ContainerStarted","Data":"4a516347e5ca0a2ac822ca8d74260b0a399b34ea1ef69ec98fd72a3b8f0f8ed9"} Dec 10 00:36:55 crc kubenswrapper[4884]: I1210 00:36:55.015550 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7vf7m" event={"ID":"218fb300-2f8e-4ba7-a947-a45ed8426678","Type":"ContainerStarted","Data":"24b4e8e3b147534e6905e54e0d2fb21035889a85d2a29dbfd60940fb7e608139"} Dec 10 00:36:55 crc kubenswrapper[4884]: I1210 00:36:55.027020 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dgn4r" podStartSLOduration=3.476995537 podStartE2EDuration="6.02700041s" podCreationTimestamp="2025-12-10 00:36:49 +0000 UTC" firstStartedPulling="2025-12-10 00:36:50.925811412 +0000 UTC m=+384.003768539" lastFinishedPulling="2025-12-10 00:36:53.475816255 +0000 UTC m=+386.553773412" observedRunningTime="2025-12-10 00:36:55.026503216 +0000 UTC m=+388.104460353" watchObservedRunningTime="2025-12-10 00:36:55.02700041 +0000 UTC m=+388.104957527" Dec 10 00:36:57 crc kubenswrapper[4884]: I1210 00:36:57.019642 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:57 crc kubenswrapper[4884]: I1210 00:36:57.019704 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:57 crc kubenswrapper[4884]: I1210 00:36:57.065454 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:57 crc kubenswrapper[4884]: I1210 00:36:57.087826 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7vf7m" podStartSLOduration=5.494554431 podStartE2EDuration="8.087807695s" podCreationTimestamp="2025-12-10 00:36:49 +0000 UTC" firstStartedPulling="2025-12-10 00:36:50.922931412 +0000 UTC m=+384.000888549" lastFinishedPulling="2025-12-10 00:36:53.516184696 +0000 UTC m=+386.594141813" observedRunningTime="2025-12-10 00:36:55.050457046 +0000 UTC m=+388.128414173" watchObservedRunningTime="2025-12-10 00:36:57.087807695 +0000 UTC m=+390.165764812" Dec 10 00:36:57 crc kubenswrapper[4884]: I1210 00:36:57.248409 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:57 crc kubenswrapper[4884]: I1210 00:36:57.248522 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:36:58 crc kubenswrapper[4884]: I1210 00:36:58.101105 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-c95cd" Dec 10 00:36:58 crc kubenswrapper[4884]: I1210 00:36:58.294544 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lz2kk" podUID="2faf949b-c390-442b-8b2c-dc394506f3d4" containerName="registry-server" probeResult="failure" output=< Dec 10 00:36:58 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 00:36:58 crc kubenswrapper[4884]: > Dec 10 00:36:59 crc kubenswrapper[4884]: I1210 00:36:59.460172 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:59 crc kubenswrapper[4884]: I1210 00:36:59.460244 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:59 crc kubenswrapper[4884]: I1210 00:36:59.523639 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:36:59 crc kubenswrapper[4884]: I1210 00:36:59.610994 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:59 crc kubenswrapper[4884]: I1210 00:36:59.611053 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:36:59 crc kubenswrapper[4884]: I1210 00:36:59.663963 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:37:00 crc kubenswrapper[4884]: I1210 00:37:00.094492 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:37:00 crc kubenswrapper[4884]: I1210 00:37:00.106161 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7vf7m" Dec 10 00:37:00 crc kubenswrapper[4884]: I1210 00:37:00.973546 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-tpkdc" Dec 10 00:37:01 crc kubenswrapper[4884]: I1210 00:37:01.037597 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-pt6z4"] Dec 10 00:37:07 crc kubenswrapper[4884]: I1210 00:37:07.304501 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:37:07 crc kubenswrapper[4884]: I1210 00:37:07.367649 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lz2kk" Dec 10 00:37:18 crc kubenswrapper[4884]: I1210 00:37:18.098360 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:37:18 crc kubenswrapper[4884]: I1210 00:37:18.098976 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.082507 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" podUID="40fa21aa-487a-46a9-a396-25ba52971640" containerName="registry" containerID="cri-o://6cf153efd5ff451c3bb34f6a9b9aa59db77830ee04761b375242a008d0c8ed72" gracePeriod=30 Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.219931 4884 generic.go:334] "Generic (PLEG): container finished" podID="40fa21aa-487a-46a9-a396-25ba52971640" containerID="6cf153efd5ff451c3bb34f6a9b9aa59db77830ee04761b375242a008d0c8ed72" exitCode=0 Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.219987 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" event={"ID":"40fa21aa-487a-46a9-a396-25ba52971640","Type":"ContainerDied","Data":"6cf153efd5ff451c3bb34f6a9b9aa59db77830ee04761b375242a008d0c8ed72"} Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.498954 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.549771 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-bound-sa-token\") pod \"40fa21aa-487a-46a9-a396-25ba52971640\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.549852 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/40fa21aa-487a-46a9-a396-25ba52971640-registry-certificates\") pod \"40fa21aa-487a-46a9-a396-25ba52971640\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.549923 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-registry-tls\") pod \"40fa21aa-487a-46a9-a396-25ba52971640\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.550185 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"40fa21aa-487a-46a9-a396-25ba52971640\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.550248 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/40fa21aa-487a-46a9-a396-25ba52971640-installation-pull-secrets\") pod \"40fa21aa-487a-46a9-a396-25ba52971640\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.550320 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zb8b\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-kube-api-access-6zb8b\") pod \"40fa21aa-487a-46a9-a396-25ba52971640\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.550394 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/40fa21aa-487a-46a9-a396-25ba52971640-trusted-ca\") pod \"40fa21aa-487a-46a9-a396-25ba52971640\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.550460 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/40fa21aa-487a-46a9-a396-25ba52971640-ca-trust-extracted\") pod \"40fa21aa-487a-46a9-a396-25ba52971640\" (UID: \"40fa21aa-487a-46a9-a396-25ba52971640\") " Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.552614 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40fa21aa-487a-46a9-a396-25ba52971640-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "40fa21aa-487a-46a9-a396-25ba52971640" (UID: "40fa21aa-487a-46a9-a396-25ba52971640"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.552798 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40fa21aa-487a-46a9-a396-25ba52971640-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "40fa21aa-487a-46a9-a396-25ba52971640" (UID: "40fa21aa-487a-46a9-a396-25ba52971640"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.555192 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/40fa21aa-487a-46a9-a396-25ba52971640-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.555216 4884 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/40fa21aa-487a-46a9-a396-25ba52971640-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.557278 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40fa21aa-487a-46a9-a396-25ba52971640-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "40fa21aa-487a-46a9-a396-25ba52971640" (UID: "40fa21aa-487a-46a9-a396-25ba52971640"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.557398 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "40fa21aa-487a-46a9-a396-25ba52971640" (UID: "40fa21aa-487a-46a9-a396-25ba52971640"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.558166 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "40fa21aa-487a-46a9-a396-25ba52971640" (UID: "40fa21aa-487a-46a9-a396-25ba52971640"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.558233 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-kube-api-access-6zb8b" (OuterVolumeSpecName: "kube-api-access-6zb8b") pod "40fa21aa-487a-46a9-a396-25ba52971640" (UID: "40fa21aa-487a-46a9-a396-25ba52971640"). InnerVolumeSpecName "kube-api-access-6zb8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.568133 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "40fa21aa-487a-46a9-a396-25ba52971640" (UID: "40fa21aa-487a-46a9-a396-25ba52971640"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.578881 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/40fa21aa-487a-46a9-a396-25ba52971640-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "40fa21aa-487a-46a9-a396-25ba52971640" (UID: "40fa21aa-487a-46a9-a396-25ba52971640"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.656601 4884 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/40fa21aa-487a-46a9-a396-25ba52971640-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.656885 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zb8b\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-kube-api-access-6zb8b\") on node \"crc\" DevicePath \"\"" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.656956 4884 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/40fa21aa-487a-46a9-a396-25ba52971640-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.657064 4884 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 00:37:26 crc kubenswrapper[4884]: I1210 00:37:26.657126 4884 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/40fa21aa-487a-46a9-a396-25ba52971640-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:37:27 crc kubenswrapper[4884]: I1210 00:37:27.236562 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" event={"ID":"40fa21aa-487a-46a9-a396-25ba52971640","Type":"ContainerDied","Data":"ea70a7ecefade4ec6f517f8e7a40aca5cc675bdbe35f109daad57ca710cefca0"} Dec 10 00:37:27 crc kubenswrapper[4884]: I1210 00:37:27.236626 4884 scope.go:117] "RemoveContainer" containerID="6cf153efd5ff451c3bb34f6a9b9aa59db77830ee04761b375242a008d0c8ed72" Dec 10 00:37:27 crc kubenswrapper[4884]: I1210 00:37:27.236680 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-pt6z4" Dec 10 00:37:27 crc kubenswrapper[4884]: I1210 00:37:27.297709 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-pt6z4"] Dec 10 00:37:27 crc kubenswrapper[4884]: I1210 00:37:27.300010 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-pt6z4"] Dec 10 00:37:29 crc kubenswrapper[4884]: I1210 00:37:29.299500 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40fa21aa-487a-46a9-a396-25ba52971640" path="/var/lib/kubelet/pods/40fa21aa-487a-46a9-a396-25ba52971640/volumes" Dec 10 00:37:48 crc kubenswrapper[4884]: I1210 00:37:48.098181 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:37:48 crc kubenswrapper[4884]: I1210 00:37:48.098757 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:37:48 crc kubenswrapper[4884]: I1210 00:37:48.098815 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:37:48 crc kubenswrapper[4884]: I1210 00:37:48.099644 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ecf042e581c7c61981c2950a871cf1332176d990f8fd754c1ccf327fef18a9db"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 00:37:48 crc kubenswrapper[4884]: I1210 00:37:48.099722 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://ecf042e581c7c61981c2950a871cf1332176d990f8fd754c1ccf327fef18a9db" gracePeriod=600 Dec 10 00:37:48 crc kubenswrapper[4884]: I1210 00:37:48.373823 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="ecf042e581c7c61981c2950a871cf1332176d990f8fd754c1ccf327fef18a9db" exitCode=0 Dec 10 00:37:48 crc kubenswrapper[4884]: I1210 00:37:48.373887 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"ecf042e581c7c61981c2950a871cf1332176d990f8fd754c1ccf327fef18a9db"} Dec 10 00:37:48 crc kubenswrapper[4884]: I1210 00:37:48.373933 4884 scope.go:117] "RemoveContainer" containerID="d40757a26a946a7173b62e65909078b97eeacacc342cf2ea824ab11f906cf9e7" Dec 10 00:37:49 crc kubenswrapper[4884]: I1210 00:37:49.383420 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"279135d8bce1538b9c608fe9ea1546482ee6e7d7fb759350407d1512aa1756a8"} Dec 10 00:39:48 crc kubenswrapper[4884]: I1210 00:39:48.099031 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:39:48 crc kubenswrapper[4884]: I1210 00:39:48.099759 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:40:18 crc kubenswrapper[4884]: I1210 00:40:18.098895 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:40:18 crc kubenswrapper[4884]: I1210 00:40:18.099726 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:40:48 crc kubenswrapper[4884]: I1210 00:40:48.097806 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:40:48 crc kubenswrapper[4884]: I1210 00:40:48.098286 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:40:48 crc kubenswrapper[4884]: I1210 00:40:48.098360 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:40:48 crc kubenswrapper[4884]: I1210 00:40:48.098961 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"279135d8bce1538b9c608fe9ea1546482ee6e7d7fb759350407d1512aa1756a8"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 00:40:48 crc kubenswrapper[4884]: I1210 00:40:48.099082 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://279135d8bce1538b9c608fe9ea1546482ee6e7d7fb759350407d1512aa1756a8" gracePeriod=600 Dec 10 00:40:48 crc kubenswrapper[4884]: I1210 00:40:48.653139 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="279135d8bce1538b9c608fe9ea1546482ee6e7d7fb759350407d1512aa1756a8" exitCode=0 Dec 10 00:40:48 crc kubenswrapper[4884]: I1210 00:40:48.653239 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"279135d8bce1538b9c608fe9ea1546482ee6e7d7fb759350407d1512aa1756a8"} Dec 10 00:40:48 crc kubenswrapper[4884]: I1210 00:40:48.653422 4884 scope.go:117] "RemoveContainer" containerID="ecf042e581c7c61981c2950a871cf1332176d990f8fd754c1ccf327fef18a9db" Dec 10 00:40:49 crc kubenswrapper[4884]: I1210 00:40:49.663570 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"1c531d5171ce3be4c53ba6e890eb2f889d87559fb1212cb5a42702e23d9e930b"} Dec 10 00:42:48 crc kubenswrapper[4884]: I1210 00:42:48.098671 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:42:48 crc kubenswrapper[4884]: I1210 00:42:48.099119 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:43:09 crc kubenswrapper[4884]: I1210 00:43:09.152671 4884 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 10 00:43:18 crc kubenswrapper[4884]: I1210 00:43:18.098809 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:43:18 crc kubenswrapper[4884]: I1210 00:43:18.099605 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:43:48 crc kubenswrapper[4884]: I1210 00:43:48.098668 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:43:48 crc kubenswrapper[4884]: I1210 00:43:48.100400 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:43:48 crc kubenswrapper[4884]: I1210 00:43:48.100540 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:43:48 crc kubenswrapper[4884]: I1210 00:43:48.101096 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1c531d5171ce3be4c53ba6e890eb2f889d87559fb1212cb5a42702e23d9e930b"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 00:43:48 crc kubenswrapper[4884]: I1210 00:43:48.101238 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://1c531d5171ce3be4c53ba6e890eb2f889d87559fb1212cb5a42702e23d9e930b" gracePeriod=600 Dec 10 00:43:48 crc kubenswrapper[4884]: I1210 00:43:48.873739 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="1c531d5171ce3be4c53ba6e890eb2f889d87559fb1212cb5a42702e23d9e930b" exitCode=0 Dec 10 00:43:48 crc kubenswrapper[4884]: I1210 00:43:48.873793 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"1c531d5171ce3be4c53ba6e890eb2f889d87559fb1212cb5a42702e23d9e930b"} Dec 10 00:43:48 crc kubenswrapper[4884]: I1210 00:43:48.874082 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"fb549c86290e5eaaf8d0b787f8b183ae212b7154cabf782092b91c5f95b4ef4c"} Dec 10 00:43:48 crc kubenswrapper[4884]: I1210 00:43:48.874102 4884 scope.go:117] "RemoveContainer" containerID="279135d8bce1538b9c608fe9ea1546482ee6e7d7fb759350407d1512aa1756a8" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.110774 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld"] Dec 10 00:44:00 crc kubenswrapper[4884]: E1210 00:44:00.111459 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40fa21aa-487a-46a9-a396-25ba52971640" containerName="registry" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.111470 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="40fa21aa-487a-46a9-a396-25ba52971640" containerName="registry" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.111561 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="40fa21aa-487a-46a9-a396-25ba52971640" containerName="registry" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.112179 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.118088 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.125293 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld"] Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.205910 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bf8r4\" (UniqueName: \"kubernetes.io/projected/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-kube-api-access-bf8r4\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld\" (UID: \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.206104 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld\" (UID: \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.206166 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld\" (UID: \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.308199 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld\" (UID: \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.308267 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld\" (UID: \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.308342 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bf8r4\" (UniqueName: \"kubernetes.io/projected/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-kube-api-access-bf8r4\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld\" (UID: \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.309085 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld\" (UID: \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.309299 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld\" (UID: \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.335475 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bf8r4\" (UniqueName: \"kubernetes.io/projected/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-kube-api-access-bf8r4\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld\" (UID: \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.429663 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.608390 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld"] Dec 10 00:44:00 crc kubenswrapper[4884]: W1210 00:44:00.614376 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d1f1281_4b1e_4b2f_988d_c0a3a4045592.slice/crio-c24bba3d772a10fbaee0b040e104a29a39602892e7f206de7935dd07991d5014 WatchSource:0}: Error finding container c24bba3d772a10fbaee0b040e104a29a39602892e7f206de7935dd07991d5014: Status 404 returned error can't find the container with id c24bba3d772a10fbaee0b040e104a29a39602892e7f206de7935dd07991d5014 Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.961326 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" event={"ID":"7d1f1281-4b1e-4b2f-988d-c0a3a4045592","Type":"ContainerStarted","Data":"c731c711ff0cf6061879548ca6369269a7689f408fb0789aad1822d8e36bcb41"} Dec 10 00:44:00 crc kubenswrapper[4884]: I1210 00:44:00.961764 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" event={"ID":"7d1f1281-4b1e-4b2f-988d-c0a3a4045592","Type":"ContainerStarted","Data":"c24bba3d772a10fbaee0b040e104a29a39602892e7f206de7935dd07991d5014"} Dec 10 00:44:01 crc kubenswrapper[4884]: I1210 00:44:01.977703 4884 generic.go:334] "Generic (PLEG): container finished" podID="7d1f1281-4b1e-4b2f-988d-c0a3a4045592" containerID="c731c711ff0cf6061879548ca6369269a7689f408fb0789aad1822d8e36bcb41" exitCode=0 Dec 10 00:44:01 crc kubenswrapper[4884]: I1210 00:44:01.977903 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" event={"ID":"7d1f1281-4b1e-4b2f-988d-c0a3a4045592","Type":"ContainerDied","Data":"c731c711ff0cf6061879548ca6369269a7689f408fb0789aad1822d8e36bcb41"} Dec 10 00:44:01 crc kubenswrapper[4884]: I1210 00:44:01.980489 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.448793 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xzh8t"] Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.450050 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.484453 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xzh8t"] Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.538733 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wv6t\" (UniqueName: \"kubernetes.io/projected/9cff36d7-7f16-4fed-b966-9b4eb28d296b-kube-api-access-6wv6t\") pod \"redhat-operators-xzh8t\" (UID: \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\") " pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.538813 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cff36d7-7f16-4fed-b966-9b4eb28d296b-catalog-content\") pod \"redhat-operators-xzh8t\" (UID: \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\") " pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.538950 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cff36d7-7f16-4fed-b966-9b4eb28d296b-utilities\") pod \"redhat-operators-xzh8t\" (UID: \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\") " pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.640843 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wv6t\" (UniqueName: \"kubernetes.io/projected/9cff36d7-7f16-4fed-b966-9b4eb28d296b-kube-api-access-6wv6t\") pod \"redhat-operators-xzh8t\" (UID: \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\") " pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.640926 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cff36d7-7f16-4fed-b966-9b4eb28d296b-catalog-content\") pod \"redhat-operators-xzh8t\" (UID: \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\") " pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.640951 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cff36d7-7f16-4fed-b966-9b4eb28d296b-utilities\") pod \"redhat-operators-xzh8t\" (UID: \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\") " pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.641600 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cff36d7-7f16-4fed-b966-9b4eb28d296b-utilities\") pod \"redhat-operators-xzh8t\" (UID: \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\") " pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.641594 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cff36d7-7f16-4fed-b966-9b4eb28d296b-catalog-content\") pod \"redhat-operators-xzh8t\" (UID: \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\") " pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.677963 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wv6t\" (UniqueName: \"kubernetes.io/projected/9cff36d7-7f16-4fed-b966-9b4eb28d296b-kube-api-access-6wv6t\") pod \"redhat-operators-xzh8t\" (UID: \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\") " pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.772706 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.970218 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xzh8t"] Dec 10 00:44:02 crc kubenswrapper[4884]: I1210 00:44:02.987387 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzh8t" event={"ID":"9cff36d7-7f16-4fed-b966-9b4eb28d296b","Type":"ContainerStarted","Data":"2cd0472aea12528dc59e696155e256b207638b3e47e27941652620965b91cd09"} Dec 10 00:44:03 crc kubenswrapper[4884]: I1210 00:44:03.995081 4884 generic.go:334] "Generic (PLEG): container finished" podID="7d1f1281-4b1e-4b2f-988d-c0a3a4045592" containerID="1a4493173081bcb42b7b0499ec3794966b9c1c9bcf840a686afa73bc0c672a54" exitCode=0 Dec 10 00:44:03 crc kubenswrapper[4884]: I1210 00:44:03.995210 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" event={"ID":"7d1f1281-4b1e-4b2f-988d-c0a3a4045592","Type":"ContainerDied","Data":"1a4493173081bcb42b7b0499ec3794966b9c1c9bcf840a686afa73bc0c672a54"} Dec 10 00:44:03 crc kubenswrapper[4884]: I1210 00:44:03.996873 4884 generic.go:334] "Generic (PLEG): container finished" podID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" containerID="75ca3bde973bf525051dcad83ff053a85742391bb9c4e860272d35125ea64aba" exitCode=0 Dec 10 00:44:03 crc kubenswrapper[4884]: I1210 00:44:03.996912 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzh8t" event={"ID":"9cff36d7-7f16-4fed-b966-9b4eb28d296b","Type":"ContainerDied","Data":"75ca3bde973bf525051dcad83ff053a85742391bb9c4e860272d35125ea64aba"} Dec 10 00:44:05 crc kubenswrapper[4884]: I1210 00:44:05.011302 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzh8t" event={"ID":"9cff36d7-7f16-4fed-b966-9b4eb28d296b","Type":"ContainerStarted","Data":"e01e1c597de535c0d14dc0706eb1ddcec3c228b607f2eb59c6b6ed6d5ff1fbb8"} Dec 10 00:44:05 crc kubenswrapper[4884]: I1210 00:44:05.017760 4884 generic.go:334] "Generic (PLEG): container finished" podID="7d1f1281-4b1e-4b2f-988d-c0a3a4045592" containerID="87456a6d0ed1cf6fa04ebead50872a709bd7e8829816c78e7402c7ecc32ea1ed" exitCode=0 Dec 10 00:44:05 crc kubenswrapper[4884]: I1210 00:44:05.017808 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" event={"ID":"7d1f1281-4b1e-4b2f-988d-c0a3a4045592","Type":"ContainerDied","Data":"87456a6d0ed1cf6fa04ebead50872a709bd7e8829816c78e7402c7ecc32ea1ed"} Dec 10 00:44:06 crc kubenswrapper[4884]: I1210 00:44:06.035957 4884 generic.go:334] "Generic (PLEG): container finished" podID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" containerID="e01e1c597de535c0d14dc0706eb1ddcec3c228b607f2eb59c6b6ed6d5ff1fbb8" exitCode=0 Dec 10 00:44:06 crc kubenswrapper[4884]: I1210 00:44:06.036103 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzh8t" event={"ID":"9cff36d7-7f16-4fed-b966-9b4eb28d296b","Type":"ContainerDied","Data":"e01e1c597de535c0d14dc0706eb1ddcec3c228b607f2eb59c6b6ed6d5ff1fbb8"} Dec 10 00:44:06 crc kubenswrapper[4884]: I1210 00:44:06.246505 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:06 crc kubenswrapper[4884]: I1210 00:44:06.291349 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-bundle\") pod \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\" (UID: \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\") " Dec 10 00:44:06 crc kubenswrapper[4884]: I1210 00:44:06.291397 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-util\") pod \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\" (UID: \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\") " Dec 10 00:44:06 crc kubenswrapper[4884]: I1210 00:44:06.291461 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf8r4\" (UniqueName: \"kubernetes.io/projected/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-kube-api-access-bf8r4\") pod \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\" (UID: \"7d1f1281-4b1e-4b2f-988d-c0a3a4045592\") " Dec 10 00:44:06 crc kubenswrapper[4884]: I1210 00:44:06.293207 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-bundle" (OuterVolumeSpecName: "bundle") pod "7d1f1281-4b1e-4b2f-988d-c0a3a4045592" (UID: "7d1f1281-4b1e-4b2f-988d-c0a3a4045592"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:44:06 crc kubenswrapper[4884]: I1210 00:44:06.296190 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-kube-api-access-bf8r4" (OuterVolumeSpecName: "kube-api-access-bf8r4") pod "7d1f1281-4b1e-4b2f-988d-c0a3a4045592" (UID: "7d1f1281-4b1e-4b2f-988d-c0a3a4045592"). InnerVolumeSpecName "kube-api-access-bf8r4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:44:06 crc kubenswrapper[4884]: I1210 00:44:06.387248 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-util" (OuterVolumeSpecName: "util") pod "7d1f1281-4b1e-4b2f-988d-c0a3a4045592" (UID: "7d1f1281-4b1e-4b2f-988d-c0a3a4045592"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:44:06 crc kubenswrapper[4884]: I1210 00:44:06.394413 4884 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:06 crc kubenswrapper[4884]: I1210 00:44:06.394465 4884 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-util\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:06 crc kubenswrapper[4884]: I1210 00:44:06.394479 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf8r4\" (UniqueName: \"kubernetes.io/projected/7d1f1281-4b1e-4b2f-988d-c0a3a4045592-kube-api-access-bf8r4\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:07 crc kubenswrapper[4884]: I1210 00:44:07.043024 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" event={"ID":"7d1f1281-4b1e-4b2f-988d-c0a3a4045592","Type":"ContainerDied","Data":"c24bba3d772a10fbaee0b040e104a29a39602892e7f206de7935dd07991d5014"} Dec 10 00:44:07 crc kubenswrapper[4884]: I1210 00:44:07.043344 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c24bba3d772a10fbaee0b040e104a29a39602892e7f206de7935dd07991d5014" Dec 10 00:44:07 crc kubenswrapper[4884]: I1210 00:44:07.043426 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld" Dec 10 00:44:08 crc kubenswrapper[4884]: I1210 00:44:08.053378 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzh8t" event={"ID":"9cff36d7-7f16-4fed-b966-9b4eb28d296b","Type":"ContainerStarted","Data":"60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb"} Dec 10 00:44:08 crc kubenswrapper[4884]: I1210 00:44:08.073528 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xzh8t" podStartSLOduration=2.510509462 podStartE2EDuration="6.073505585s" podCreationTimestamp="2025-12-10 00:44:02 +0000 UTC" firstStartedPulling="2025-12-10 00:44:03.997622279 +0000 UTC m=+817.075579396" lastFinishedPulling="2025-12-10 00:44:07.560618362 +0000 UTC m=+820.638575519" observedRunningTime="2025-12-10 00:44:08.069094323 +0000 UTC m=+821.147051480" watchObservedRunningTime="2025-12-10 00:44:08.073505585 +0000 UTC m=+821.151462732" Dec 10 00:44:11 crc kubenswrapper[4884]: I1210 00:44:11.103277 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-g8w62"] Dec 10 00:44:11 crc kubenswrapper[4884]: I1210 00:44:11.104445 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovn-controller" containerID="cri-o://0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4" gracePeriod=30 Dec 10 00:44:11 crc kubenswrapper[4884]: I1210 00:44:11.104551 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="sbdb" containerID="cri-o://efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232" gracePeriod=30 Dec 10 00:44:11 crc kubenswrapper[4884]: I1210 00:44:11.104606 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovn-acl-logging" containerID="cri-o://851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383" gracePeriod=30 Dec 10 00:44:11 crc kubenswrapper[4884]: I1210 00:44:11.104637 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="northd" containerID="cri-o://c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12" gracePeriod=30 Dec 10 00:44:11 crc kubenswrapper[4884]: I1210 00:44:11.104657 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="kube-rbac-proxy-node" containerID="cri-o://b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a" gracePeriod=30 Dec 10 00:44:11 crc kubenswrapper[4884]: I1210 00:44:11.104509 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="nbdb" containerID="cri-o://4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540" gracePeriod=30 Dec 10 00:44:11 crc kubenswrapper[4884]: I1210 00:44:11.104565 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b" gracePeriod=30 Dec 10 00:44:11 crc kubenswrapper[4884]: I1210 00:44:11.152620 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" containerID="cri-o://25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5" gracePeriod=30 Dec 10 00:44:12 crc kubenswrapper[4884]: I1210 00:44:12.774026 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:12 crc kubenswrapper[4884]: I1210 00:44:12.774534 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.083779 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovnkube-controller/3.log" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.098674 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovn-acl-logging/0.log" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.099748 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovn-controller/0.log" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.100805 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5" exitCode=0 Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.100828 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b" exitCode=0 Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.100837 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a" exitCode=0 Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.100846 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383" exitCode=143 Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.100853 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4" exitCode=143 Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.100894 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5"} Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.100919 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b"} Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.100930 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a"} Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.100939 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383"} Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.100947 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4"} Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.100961 4884 scope.go:117] "RemoveContainer" containerID="1f49cfed7fd755c619feda112b18be52e20ca0f0e83c0f45f762cffa17d17209" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.103084 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rcj68_0269081f-f135-4e66-91fd-a16277a00355/kube-multus/2.log" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.103406 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rcj68_0269081f-f135-4e66-91fd-a16277a00355/kube-multus/1.log" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.103450 4884 generic.go:334] "Generic (PLEG): container finished" podID="0269081f-f135-4e66-91fd-a16277a00355" containerID="70a038a9dee1c81b9417df5e512466f52dbcbee69d176ecad5c1a2f358d0fb4d" exitCode=2 Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.103477 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rcj68" event={"ID":"0269081f-f135-4e66-91fd-a16277a00355","Type":"ContainerDied","Data":"70a038a9dee1c81b9417df5e512466f52dbcbee69d176ecad5c1a2f358d0fb4d"} Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.104399 4884 scope.go:117] "RemoveContainer" containerID="70a038a9dee1c81b9417df5e512466f52dbcbee69d176ecad5c1a2f358d0fb4d" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.185860 4884 scope.go:117] "RemoveContainer" containerID="90ee592c4791c1b29c7c97cfd3a53bd14803fc72b208e3993cc2fa8894f230fa" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.269797 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovn-acl-logging/0.log" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.270170 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovn-controller/0.log" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.270496 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.374688 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-zz62l"] Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375223 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d1f1281-4b1e-4b2f-988d-c0a3a4045592" containerName="pull" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375241 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d1f1281-4b1e-4b2f-988d-c0a3a4045592" containerName="pull" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375266 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovn-acl-logging" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375275 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovn-acl-logging" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375286 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375294 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375304 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="kube-rbac-proxy-node" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375311 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="kube-rbac-proxy-node" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375323 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375332 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375342 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovn-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375349 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovn-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375359 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="kube-rbac-proxy-ovn-metrics" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375366 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="kube-rbac-proxy-ovn-metrics" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375375 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="kubecfg-setup" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375382 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="kubecfg-setup" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375390 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d1f1281-4b1e-4b2f-988d-c0a3a4045592" containerName="util" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375397 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d1f1281-4b1e-4b2f-988d-c0a3a4045592" containerName="util" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375407 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375414 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375424 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="sbdb" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375450 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="sbdb" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375460 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375467 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375475 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375483 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375494 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="northd" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375502 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="northd" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375519 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d1f1281-4b1e-4b2f-988d-c0a3a4045592" containerName="extract" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375526 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d1f1281-4b1e-4b2f-988d-c0a3a4045592" containerName="extract" Dec 10 00:44:13 crc kubenswrapper[4884]: E1210 00:44:13.375541 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="nbdb" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375549 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="nbdb" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375669 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovn-acl-logging" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375683 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="sbdb" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375693 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375704 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d1f1281-4b1e-4b2f-988d-c0a3a4045592" containerName="extract" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375718 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="kube-rbac-proxy-node" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375725 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375735 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="kube-rbac-proxy-ovn-metrics" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375745 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375754 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375762 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="nbdb" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375771 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovn-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375778 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="northd" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.375992 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" containerName="ovnkube-controller" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.379650 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.384813 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-ovn\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.384906 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.384975 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-systemd\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.385863 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-var-lib-openvswitch\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.385903 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-openvswitch\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.385929 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-systemd-units\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.385958 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7022e894-7a34-4a84-8b18-e4440e11e659-ovn-node-metrics-cert\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.385979 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-cni-bin\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.385983 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386011 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-ovnkube-config\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386011 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386041 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-etc-openvswitch\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386029 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386046 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386082 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-log-socket\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386105 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386114 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-env-overrides\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386131 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-log-socket" (OuterVolumeSpecName: "log-socket") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386146 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-var-lib-cni-networks-ovn-kubernetes\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386170 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-run-ovn-kubernetes\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386219 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386303 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386347 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-ovnkube-script-lib\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386376 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-run-netns\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386585 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386630 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386791 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386834 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386891 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-kubelet\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386933 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-slash\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386958 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-node-log\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.386979 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-cni-netd\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387043 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9hvx\" (UniqueName: \"kubernetes.io/projected/7022e894-7a34-4a84-8b18-e4440e11e659-kube-api-access-c9hvx\") pod \"7022e894-7a34-4a84-8b18-e4440e11e659\" (UID: \"7022e894-7a34-4a84-8b18-e4440e11e659\") " Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387072 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-slash" (OuterVolumeSpecName: "host-slash") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387099 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-node-log" (OuterVolumeSpecName: "node-log") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387100 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387116 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387249 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-cni-netd\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387281 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-log-socket\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387300 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4267m\" (UniqueName: \"kubernetes.io/projected/3117fc31-26b3-498f-8121-d5e7297b9986-kube-api-access-4267m\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387398 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-node-log\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387487 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-kubelet\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387508 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-var-lib-openvswitch\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387532 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-etc-openvswitch\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387546 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3117fc31-26b3-498f-8121-d5e7297b9986-env-overrides\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387565 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-cni-bin\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387585 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-run-openvswitch\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387603 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387631 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-run-netns\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387648 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-systemd-units\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387666 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3117fc31-26b3-498f-8121-d5e7297b9986-ovnkube-script-lib\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387686 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3117fc31-26b3-498f-8121-d5e7297b9986-ovn-node-metrics-cert\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387704 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3117fc31-26b3-498f-8121-d5e7297b9986-ovnkube-config\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387735 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-run-ovn-kubernetes\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387793 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-run-systemd\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387878 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-run-ovn\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387896 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-slash\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387977 4884 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-systemd-units\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387988 4884 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-cni-bin\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.387996 4884 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388006 4884 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388015 4884 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-log-socket\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388023 4884 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388032 4884 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388042 4884 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388051 4884 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7022e894-7a34-4a84-8b18-e4440e11e659-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388059 4884 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-run-netns\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388068 4884 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-kubelet\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388075 4884 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-slash\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388083 4884 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-node-log\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388092 4884 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-host-cni-netd\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388101 4884 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388111 4884 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.388119 4884 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.402788 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7022e894-7a34-4a84-8b18-e4440e11e659-kube-api-access-c9hvx" (OuterVolumeSpecName: "kube-api-access-c9hvx") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "kube-api-access-c9hvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.403013 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7022e894-7a34-4a84-8b18-e4440e11e659-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.405858 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "7022e894-7a34-4a84-8b18-e4440e11e659" (UID: "7022e894-7a34-4a84-8b18-e4440e11e659"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489390 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4267m\" (UniqueName: \"kubernetes.io/projected/3117fc31-26b3-498f-8121-d5e7297b9986-kube-api-access-4267m\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489451 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-node-log\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489470 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-kubelet\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489486 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-var-lib-openvswitch\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489517 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-etc-openvswitch\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489535 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3117fc31-26b3-498f-8121-d5e7297b9986-env-overrides\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489554 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-cni-bin\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489581 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-run-openvswitch\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489581 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-kubelet\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489601 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489615 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-cni-bin\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489626 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-etc-openvswitch\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489659 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-run-netns\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489671 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-var-lib-openvswitch\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489581 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-node-log\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489624 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-run-netns\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489701 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489652 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-run-openvswitch\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489725 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-systemd-units\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489742 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-systemd-units\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489765 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3117fc31-26b3-498f-8121-d5e7297b9986-ovnkube-script-lib\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489812 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3117fc31-26b3-498f-8121-d5e7297b9986-ovn-node-metrics-cert\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489854 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3117fc31-26b3-498f-8121-d5e7297b9986-ovnkube-config\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.489878 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-run-ovn-kubernetes\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490020 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-run-ovn-kubernetes\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490103 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-run-systemd\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490134 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/3117fc31-26b3-498f-8121-d5e7297b9986-env-overrides\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490366 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/3117fc31-26b3-498f-8121-d5e7297b9986-ovnkube-script-lib\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490599 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/3117fc31-26b3-498f-8121-d5e7297b9986-ovnkube-config\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490652 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-run-systemd\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490680 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-run-ovn\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490725 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-slash\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490748 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-cni-netd\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490794 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-slash\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490827 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-log-socket\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490833 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-run-ovn\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490891 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-host-cni-netd\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490923 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7022e894-7a34-4a84-8b18-e4440e11e659-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490945 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/3117fc31-26b3-498f-8121-d5e7297b9986-log-socket\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490952 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9hvx\" (UniqueName: \"kubernetes.io/projected/7022e894-7a34-4a84-8b18-e4440e11e659-kube-api-access-c9hvx\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.490970 4884 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7022e894-7a34-4a84-8b18-e4440e11e659-run-systemd\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.494771 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/3117fc31-26b3-498f-8121-d5e7297b9986-ovn-node-metrics-cert\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.512164 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4267m\" (UniqueName: \"kubernetes.io/projected/3117fc31-26b3-498f-8121-d5e7297b9986-kube-api-access-4267m\") pod \"ovnkube-node-zz62l\" (UID: \"3117fc31-26b3-498f-8121-d5e7297b9986\") " pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.692094 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:13 crc kubenswrapper[4884]: W1210 00:44:13.707042 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3117fc31_26b3_498f_8121_d5e7297b9986.slice/crio-ca486c24a20fd53a23afcdbb0c3ff5044ec4126f280783a92a72100b9d0925d9 WatchSource:0}: Error finding container ca486c24a20fd53a23afcdbb0c3ff5044ec4126f280783a92a72100b9d0925d9: Status 404 returned error can't find the container with id ca486c24a20fd53a23afcdbb0c3ff5044ec4126f280783a92a72100b9d0925d9 Dec 10 00:44:13 crc kubenswrapper[4884]: I1210 00:44:13.846255 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xzh8t" podUID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" containerName="registry-server" probeResult="failure" output=< Dec 10 00:44:13 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 00:44:13 crc kubenswrapper[4884]: > Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.122592 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovn-acl-logging/0.log" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.123362 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-g8w62_7022e894-7a34-4a84-8b18-e4440e11e659/ovn-controller/0.log" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.123786 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232" exitCode=0 Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.123823 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540" exitCode=0 Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.123831 4884 generic.go:334] "Generic (PLEG): container finished" podID="7022e894-7a34-4a84-8b18-e4440e11e659" containerID="c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12" exitCode=0 Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.123890 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.123899 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232"} Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.123998 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540"} Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.124023 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12"} Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.124038 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-g8w62" event={"ID":"7022e894-7a34-4a84-8b18-e4440e11e659","Type":"ContainerDied","Data":"19146dd173a1f6b857d3a1ba60aaeb8b0f2aee62980cfdd2b64fa271bfec029d"} Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.124051 4884 scope.go:117] "RemoveContainer" containerID="25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.125957 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rcj68_0269081f-f135-4e66-91fd-a16277a00355/kube-multus/2.log" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.126044 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rcj68" event={"ID":"0269081f-f135-4e66-91fd-a16277a00355","Type":"ContainerStarted","Data":"b76671450b2a4b0bbee3265cc3c5bc537636a321dcdff1318f87a7343b620566"} Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.127507 4884 generic.go:334] "Generic (PLEG): container finished" podID="3117fc31-26b3-498f-8121-d5e7297b9986" containerID="aabbf6f9d68aa1d302481b101485e381fd658bc25ac5c7aae2781ecf16984894" exitCode=0 Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.127553 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" event={"ID":"3117fc31-26b3-498f-8121-d5e7297b9986","Type":"ContainerDied","Data":"aabbf6f9d68aa1d302481b101485e381fd658bc25ac5c7aae2781ecf16984894"} Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.127592 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" event={"ID":"3117fc31-26b3-498f-8121-d5e7297b9986","Type":"ContainerStarted","Data":"ca486c24a20fd53a23afcdbb0c3ff5044ec4126f280783a92a72100b9d0925d9"} Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.178779 4884 scope.go:117] "RemoveContainer" containerID="efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.200006 4884 scope.go:117] "RemoveContainer" containerID="4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.238553 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-g8w62"] Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.242271 4884 scope.go:117] "RemoveContainer" containerID="c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.244617 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-g8w62"] Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.264572 4884 scope.go:117] "RemoveContainer" containerID="6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.285592 4884 scope.go:117] "RemoveContainer" containerID="b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.304619 4884 scope.go:117] "RemoveContainer" containerID="851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.319588 4884 scope.go:117] "RemoveContainer" containerID="0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.335704 4884 scope.go:117] "RemoveContainer" containerID="5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.357165 4884 scope.go:117] "RemoveContainer" containerID="25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5" Dec 10 00:44:14 crc kubenswrapper[4884]: E1210 00:44:14.357654 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5\": container with ID starting with 25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5 not found: ID does not exist" containerID="25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.357697 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5"} err="failed to get container status \"25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5\": rpc error: code = NotFound desc = could not find container \"25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5\": container with ID starting with 25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.357731 4884 scope.go:117] "RemoveContainer" containerID="efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232" Dec 10 00:44:14 crc kubenswrapper[4884]: E1210 00:44:14.358181 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\": container with ID starting with efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232 not found: ID does not exist" containerID="efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.358225 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232"} err="failed to get container status \"efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\": rpc error: code = NotFound desc = could not find container \"efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\": container with ID starting with efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.358253 4884 scope.go:117] "RemoveContainer" containerID="4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540" Dec 10 00:44:14 crc kubenswrapper[4884]: E1210 00:44:14.358547 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\": container with ID starting with 4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540 not found: ID does not exist" containerID="4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.358569 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540"} err="failed to get container status \"4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\": rpc error: code = NotFound desc = could not find container \"4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\": container with ID starting with 4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.358585 4884 scope.go:117] "RemoveContainer" containerID="c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12" Dec 10 00:44:14 crc kubenswrapper[4884]: E1210 00:44:14.361635 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\": container with ID starting with c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12 not found: ID does not exist" containerID="c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.361674 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12"} err="failed to get container status \"c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\": rpc error: code = NotFound desc = could not find container \"c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\": container with ID starting with c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.361698 4884 scope.go:117] "RemoveContainer" containerID="6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b" Dec 10 00:44:14 crc kubenswrapper[4884]: E1210 00:44:14.362983 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\": container with ID starting with 6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b not found: ID does not exist" containerID="6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.363026 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b"} err="failed to get container status \"6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\": rpc error: code = NotFound desc = could not find container \"6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\": container with ID starting with 6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.363050 4884 scope.go:117] "RemoveContainer" containerID="b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a" Dec 10 00:44:14 crc kubenswrapper[4884]: E1210 00:44:14.363455 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\": container with ID starting with b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a not found: ID does not exist" containerID="b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.363565 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a"} err="failed to get container status \"b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\": rpc error: code = NotFound desc = could not find container \"b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\": container with ID starting with b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.363643 4884 scope.go:117] "RemoveContainer" containerID="851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383" Dec 10 00:44:14 crc kubenswrapper[4884]: E1210 00:44:14.364071 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\": container with ID starting with 851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383 not found: ID does not exist" containerID="851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.364095 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383"} err="failed to get container status \"851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\": rpc error: code = NotFound desc = could not find container \"851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\": container with ID starting with 851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.364110 4884 scope.go:117] "RemoveContainer" containerID="0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4" Dec 10 00:44:14 crc kubenswrapper[4884]: E1210 00:44:14.364401 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\": container with ID starting with 0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4 not found: ID does not exist" containerID="0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.364447 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4"} err="failed to get container status \"0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\": rpc error: code = NotFound desc = could not find container \"0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\": container with ID starting with 0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.364470 4884 scope.go:117] "RemoveContainer" containerID="5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa" Dec 10 00:44:14 crc kubenswrapper[4884]: E1210 00:44:14.364782 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\": container with ID starting with 5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa not found: ID does not exist" containerID="5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.364811 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa"} err="failed to get container status \"5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\": rpc error: code = NotFound desc = could not find container \"5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\": container with ID starting with 5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.364829 4884 scope.go:117] "RemoveContainer" containerID="25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.365124 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5"} err="failed to get container status \"25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5\": rpc error: code = NotFound desc = could not find container \"25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5\": container with ID starting with 25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.365211 4884 scope.go:117] "RemoveContainer" containerID="efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.365563 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232"} err="failed to get container status \"efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\": rpc error: code = NotFound desc = could not find container \"efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\": container with ID starting with efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.365587 4884 scope.go:117] "RemoveContainer" containerID="4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.365894 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540"} err="failed to get container status \"4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\": rpc error: code = NotFound desc = could not find container \"4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\": container with ID starting with 4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.365985 4884 scope.go:117] "RemoveContainer" containerID="c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.366249 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12"} err="failed to get container status \"c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\": rpc error: code = NotFound desc = could not find container \"c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\": container with ID starting with c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.366270 4884 scope.go:117] "RemoveContainer" containerID="6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.366514 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b"} err="failed to get container status \"6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\": rpc error: code = NotFound desc = could not find container \"6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\": container with ID starting with 6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.366541 4884 scope.go:117] "RemoveContainer" containerID="b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.366732 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a"} err="failed to get container status \"b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\": rpc error: code = NotFound desc = could not find container \"b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\": container with ID starting with b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.366752 4884 scope.go:117] "RemoveContainer" containerID="851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.366943 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383"} err="failed to get container status \"851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\": rpc error: code = NotFound desc = could not find container \"851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\": container with ID starting with 851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.366971 4884 scope.go:117] "RemoveContainer" containerID="0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.367167 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4"} err="failed to get container status \"0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\": rpc error: code = NotFound desc = could not find container \"0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\": container with ID starting with 0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.367257 4884 scope.go:117] "RemoveContainer" containerID="5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.367521 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa"} err="failed to get container status \"5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\": rpc error: code = NotFound desc = could not find container \"5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\": container with ID starting with 5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.367545 4884 scope.go:117] "RemoveContainer" containerID="25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.367773 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5"} err="failed to get container status \"25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5\": rpc error: code = NotFound desc = could not find container \"25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5\": container with ID starting with 25e48e6acc88024841f094ca97d4be1e6109f65078a78b209830ce6bdb5917c5 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.367811 4884 scope.go:117] "RemoveContainer" containerID="efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.368014 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232"} err="failed to get container status \"efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\": rpc error: code = NotFound desc = could not find container \"efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232\": container with ID starting with efef16d2889d60c31e2da4cbc609160fa4fc04d8bceff7bc0213a6c415f41232 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.368038 4884 scope.go:117] "RemoveContainer" containerID="4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.368222 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540"} err="failed to get container status \"4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\": rpc error: code = NotFound desc = could not find container \"4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540\": container with ID starting with 4663dfdb570e5bd112d8ae01ac571e43ca562e56e61a36986f88156aff075540 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.368241 4884 scope.go:117] "RemoveContainer" containerID="c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.368491 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12"} err="failed to get container status \"c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\": rpc error: code = NotFound desc = could not find container \"c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12\": container with ID starting with c51809eb21d1b55c01a01b27db6ae5bfe538ba8e883fed93898ad30825b5df12 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.368523 4884 scope.go:117] "RemoveContainer" containerID="6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.368732 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b"} err="failed to get container status \"6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\": rpc error: code = NotFound desc = could not find container \"6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b\": container with ID starting with 6fc0312b4011a2a3387d20b3cc95b387ea15063d9e9353b15c2f8cc81fe3ee4b not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.368759 4884 scope.go:117] "RemoveContainer" containerID="b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.369259 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a"} err="failed to get container status \"b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\": rpc error: code = NotFound desc = could not find container \"b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a\": container with ID starting with b7f63fd762e905d978d8cc3556a69a029007d58391a1fab0d8b27bedfca9d44a not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.369289 4884 scope.go:117] "RemoveContainer" containerID="851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.369758 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383"} err="failed to get container status \"851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\": rpc error: code = NotFound desc = could not find container \"851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383\": container with ID starting with 851d890fc5e06ebde302f5529e3d867d4b5596f252cb9fc1964d501df7003383 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.369788 4884 scope.go:117] "RemoveContainer" containerID="0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.370019 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4"} err="failed to get container status \"0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\": rpc error: code = NotFound desc = could not find container \"0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4\": container with ID starting with 0ccd8761c8825f001f853ebebcdd47684d8092cf9f5728bb972d82573eb15ee4 not found: ID does not exist" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.370047 4884 scope.go:117] "RemoveContainer" containerID="5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa" Dec 10 00:44:14 crc kubenswrapper[4884]: I1210 00:44:14.370290 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa"} err="failed to get container status \"5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\": rpc error: code = NotFound desc = could not find container \"5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa\": container with ID starting with 5097f3b24a4a9c39211b9abd89e61256f6d45318c8c73dfab11192346d3a06aa not found: ID does not exist" Dec 10 00:44:15 crc kubenswrapper[4884]: I1210 00:44:15.135912 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" event={"ID":"3117fc31-26b3-498f-8121-d5e7297b9986","Type":"ContainerStarted","Data":"69778fb1dc1e4259eb8b992a63e28d710b92811dfa0a00a20125c3afd78e8f34"} Dec 10 00:44:15 crc kubenswrapper[4884]: I1210 00:44:15.136186 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" event={"ID":"3117fc31-26b3-498f-8121-d5e7297b9986","Type":"ContainerStarted","Data":"de451268e801c9aa7b521f9b0ee8f60636f70fcf074154876b092a5699e2a1dd"} Dec 10 00:44:15 crc kubenswrapper[4884]: I1210 00:44:15.136198 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" event={"ID":"3117fc31-26b3-498f-8121-d5e7297b9986","Type":"ContainerStarted","Data":"dfacd335deb18aa44377594deb1b8aef3c5895c604b08f6cb5117b0ff431d0a3"} Dec 10 00:44:15 crc kubenswrapper[4884]: I1210 00:44:15.136207 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" event={"ID":"3117fc31-26b3-498f-8121-d5e7297b9986","Type":"ContainerStarted","Data":"50ea87502d31d3fbde1b7aa0bbcba1b3301322b2f55439487c36f51ceb53d826"} Dec 10 00:44:15 crc kubenswrapper[4884]: I1210 00:44:15.136216 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" event={"ID":"3117fc31-26b3-498f-8121-d5e7297b9986","Type":"ContainerStarted","Data":"0af29e653e41d98d07538264d0eea024e45716a403b471e5827d1ca6f09938d5"} Dec 10 00:44:15 crc kubenswrapper[4884]: I1210 00:44:15.136245 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" event={"ID":"3117fc31-26b3-498f-8121-d5e7297b9986","Type":"ContainerStarted","Data":"ced61b3ce5b4758ba40b0e13e4916decbf015d4788e9014e8a094639ce94986c"} Dec 10 00:44:15 crc kubenswrapper[4884]: I1210 00:44:15.293618 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7022e894-7a34-4a84-8b18-e4440e11e659" path="/var/lib/kubelet/pods/7022e894-7a34-4a84-8b18-e4440e11e659/volumes" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.690194 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm"] Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.691586 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.695400 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-wj6nf" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.695687 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.700654 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.750933 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vg85\" (UniqueName: \"kubernetes.io/projected/ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8-kube-api-access-7vg85\") pod \"obo-prometheus-operator-668cf9dfbb-jvfjm\" (UID: \"ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.796221 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth"] Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.797038 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.798687 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-zng8t" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.798723 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.802294 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95"] Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.803202 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.851997 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vg85\" (UniqueName: \"kubernetes.io/projected/ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8-kube-api-access-7vg85\") pod \"obo-prometheus-operator-668cf9dfbb-jvfjm\" (UID: \"ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.852074 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/88c45bd9-28e4-457c-aa6e-571ef793eda2-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth\" (UID: \"88c45bd9-28e4-457c-aa6e-571ef793eda2\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.852132 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a9a5957-1f59-42ac-bc0d-3f3b494c4603-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95\" (UID: \"0a9a5957-1f59-42ac-bc0d-3f3b494c4603\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.852158 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a9a5957-1f59-42ac-bc0d-3f3b494c4603-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95\" (UID: \"0a9a5957-1f59-42ac-bc0d-3f3b494c4603\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.852182 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/88c45bd9-28e4-457c-aa6e-571ef793eda2-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth\" (UID: \"88c45bd9-28e4-457c-aa6e-571ef793eda2\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.874721 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vg85\" (UniqueName: \"kubernetes.io/projected/ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8-kube-api-access-7vg85\") pod \"obo-prometheus-operator-668cf9dfbb-jvfjm\" (UID: \"ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.918195 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-4pc64"] Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.918893 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.920395 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-f5jbh" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.921137 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.953662 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7fp6\" (UniqueName: \"kubernetes.io/projected/965132f2-3d6b-4773-ac9a-9b1e964fb251-kube-api-access-h7fp6\") pod \"observability-operator-d8bb48f5d-4pc64\" (UID: \"965132f2-3d6b-4773-ac9a-9b1e964fb251\") " pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.953718 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/88c45bd9-28e4-457c-aa6e-571ef793eda2-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth\" (UID: \"88c45bd9-28e4-457c-aa6e-571ef793eda2\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.953775 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/965132f2-3d6b-4773-ac9a-9b1e964fb251-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-4pc64\" (UID: \"965132f2-3d6b-4773-ac9a-9b1e964fb251\") " pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.953813 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a9a5957-1f59-42ac-bc0d-3f3b494c4603-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95\" (UID: \"0a9a5957-1f59-42ac-bc0d-3f3b494c4603\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.953834 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a9a5957-1f59-42ac-bc0d-3f3b494c4603-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95\" (UID: \"0a9a5957-1f59-42ac-bc0d-3f3b494c4603\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.953852 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/88c45bd9-28e4-457c-aa6e-571ef793eda2-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth\" (UID: \"88c45bd9-28e4-457c-aa6e-571ef793eda2\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.957567 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/88c45bd9-28e4-457c-aa6e-571ef793eda2-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth\" (UID: \"88c45bd9-28e4-457c-aa6e-571ef793eda2\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.957893 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0a9a5957-1f59-42ac-bc0d-3f3b494c4603-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95\" (UID: \"0a9a5957-1f59-42ac-bc0d-3f3b494c4603\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.960828 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/88c45bd9-28e4-457c-aa6e-571ef793eda2-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth\" (UID: \"88c45bd9-28e4-457c-aa6e-571ef793eda2\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:18 crc kubenswrapper[4884]: I1210 00:44:18.975374 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0a9a5957-1f59-42ac-bc0d-3f3b494c4603-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95\" (UID: \"0a9a5957-1f59-42ac-bc0d-3f3b494c4603\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.015011 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-qtpmp"] Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.015144 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.015674 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.017666 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-j2dgs" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.043316 4884 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jvfjm_openshift-operators_ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8_0(686692ef0097cca0d347d8c1ea9a389cdf307af560d8906f601fcb1f8f9e167b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.043398 4884 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jvfjm_openshift-operators_ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8_0(686692ef0097cca0d347d8c1ea9a389cdf307af560d8906f601fcb1f8f9e167b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.043445 4884 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jvfjm_openshift-operators_ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8_0(686692ef0097cca0d347d8c1ea9a389cdf307af560d8906f601fcb1f8f9e167b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.043501 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-jvfjm_openshift-operators(ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-jvfjm_openshift-operators(ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jvfjm_openshift-operators_ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8_0(686692ef0097cca0d347d8c1ea9a389cdf307af560d8906f601fcb1f8f9e167b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" podUID="ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.054710 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7fp6\" (UniqueName: \"kubernetes.io/projected/965132f2-3d6b-4773-ac9a-9b1e964fb251-kube-api-access-h7fp6\") pod \"observability-operator-d8bb48f5d-4pc64\" (UID: \"965132f2-3d6b-4773-ac9a-9b1e964fb251\") " pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.054780 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/965132f2-3d6b-4773-ac9a-9b1e964fb251-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-4pc64\" (UID: \"965132f2-3d6b-4773-ac9a-9b1e964fb251\") " pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.054808 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9qg6\" (UniqueName: \"kubernetes.io/projected/5d755f81-3b23-4251-a7c1-6c2ab0a4695d-kube-api-access-j9qg6\") pod \"perses-operator-5446b9c989-qtpmp\" (UID: \"5d755f81-3b23-4251-a7c1-6c2ab0a4695d\") " pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.054847 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/5d755f81-3b23-4251-a7c1-6c2ab0a4695d-openshift-service-ca\") pod \"perses-operator-5446b9c989-qtpmp\" (UID: \"5d755f81-3b23-4251-a7c1-6c2ab0a4695d\") " pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.060382 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/965132f2-3d6b-4773-ac9a-9b1e964fb251-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-4pc64\" (UID: \"965132f2-3d6b-4773-ac9a-9b1e964fb251\") " pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.072600 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7fp6\" (UniqueName: \"kubernetes.io/projected/965132f2-3d6b-4773-ac9a-9b1e964fb251-kube-api-access-h7fp6\") pod \"observability-operator-d8bb48f5d-4pc64\" (UID: \"965132f2-3d6b-4773-ac9a-9b1e964fb251\") " pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.113037 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.121024 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.141684 4884 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_openshift-operators_88c45bd9-28e4-457c-aa6e-571ef793eda2_0(62c5e1ec486526724dcd5699a9c8c0a1861ccd9e60e3c912fd8bd9d65c61c1d8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.141775 4884 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_openshift-operators_88c45bd9-28e4-457c-aa6e-571ef793eda2_0(62c5e1ec486526724dcd5699a9c8c0a1861ccd9e60e3c912fd8bd9d65c61c1d8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.141812 4884 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_openshift-operators_88c45bd9-28e4-457c-aa6e-571ef793eda2_0(62c5e1ec486526724dcd5699a9c8c0a1861ccd9e60e3c912fd8bd9d65c61c1d8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.141892 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_openshift-operators(88c45bd9-28e4-457c-aa6e-571ef793eda2)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_openshift-operators(88c45bd9-28e4-457c-aa6e-571ef793eda2)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_openshift-operators_88c45bd9-28e4-457c-aa6e-571ef793eda2_0(62c5e1ec486526724dcd5699a9c8c0a1861ccd9e60e3c912fd8bd9d65c61c1d8): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" podUID="88c45bd9-28e4-457c-aa6e-571ef793eda2" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.150793 4884 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_openshift-operators_0a9a5957-1f59-42ac-bc0d-3f3b494c4603_0(b814ce590e97ed4d0d15ada3d0170ed7c39087e033b0d6393c843595ee8fa54f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.150849 4884 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_openshift-operators_0a9a5957-1f59-42ac-bc0d-3f3b494c4603_0(b814ce590e97ed4d0d15ada3d0170ed7c39087e033b0d6393c843595ee8fa54f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.150872 4884 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_openshift-operators_0a9a5957-1f59-42ac-bc0d-3f3b494c4603_0(b814ce590e97ed4d0d15ada3d0170ed7c39087e033b0d6393c843595ee8fa54f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.150912 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_openshift-operators(0a9a5957-1f59-42ac-bc0d-3f3b494c4603)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_openshift-operators(0a9a5957-1f59-42ac-bc0d-3f3b494c4603)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_openshift-operators_0a9a5957-1f59-42ac-bc0d-3f3b494c4603_0(b814ce590e97ed4d0d15ada3d0170ed7c39087e033b0d6393c843595ee8fa54f): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" podUID="0a9a5957-1f59-42ac-bc0d-3f3b494c4603" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.155781 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/5d755f81-3b23-4251-a7c1-6c2ab0a4695d-openshift-service-ca\") pod \"perses-operator-5446b9c989-qtpmp\" (UID: \"5d755f81-3b23-4251-a7c1-6c2ab0a4695d\") " pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.155893 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9qg6\" (UniqueName: \"kubernetes.io/projected/5d755f81-3b23-4251-a7c1-6c2ab0a4695d-kube-api-access-j9qg6\") pod \"perses-operator-5446b9c989-qtpmp\" (UID: \"5d755f81-3b23-4251-a7c1-6c2ab0a4695d\") " pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.157126 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/5d755f81-3b23-4251-a7c1-6c2ab0a4695d-openshift-service-ca\") pod \"perses-operator-5446b9c989-qtpmp\" (UID: \"5d755f81-3b23-4251-a7c1-6c2ab0a4695d\") " pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.158372 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" event={"ID":"3117fc31-26b3-498f-8121-d5e7297b9986","Type":"ContainerStarted","Data":"c53546532773fcdcfb75a5e2272faec8ce58a03dca520a9d4b08fc3da701d763"} Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.185980 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9qg6\" (UniqueName: \"kubernetes.io/projected/5d755f81-3b23-4251-a7c1-6c2ab0a4695d-kube-api-access-j9qg6\") pod \"perses-operator-5446b9c989-qtpmp\" (UID: \"5d755f81-3b23-4251-a7c1-6c2ab0a4695d\") " pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.234411 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.260895 4884 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-4pc64_openshift-operators_965132f2-3d6b-4773-ac9a-9b1e964fb251_0(a7e8859223ad29ce3cd78fefacb9172aa92ff0837ce6269a4c202795774e8cef): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.260974 4884 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-4pc64_openshift-operators_965132f2-3d6b-4773-ac9a-9b1e964fb251_0(a7e8859223ad29ce3cd78fefacb9172aa92ff0837ce6269a4c202795774e8cef): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.260999 4884 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-4pc64_openshift-operators_965132f2-3d6b-4773-ac9a-9b1e964fb251_0(a7e8859223ad29ce3cd78fefacb9172aa92ff0837ce6269a4c202795774e8cef): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.261058 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-4pc64_openshift-operators(965132f2-3d6b-4773-ac9a-9b1e964fb251)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-4pc64_openshift-operators(965132f2-3d6b-4773-ac9a-9b1e964fb251)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-4pc64_openshift-operators_965132f2-3d6b-4773-ac9a-9b1e964fb251_0(a7e8859223ad29ce3cd78fefacb9172aa92ff0837ce6269a4c202795774e8cef): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" podUID="965132f2-3d6b-4773-ac9a-9b1e964fb251" Dec 10 00:44:19 crc kubenswrapper[4884]: I1210 00:44:19.348542 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.370713 4884 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qtpmp_openshift-operators_5d755f81-3b23-4251-a7c1-6c2ab0a4695d_0(eb43e0683d1d85dcca2480d1d0776f1ea2aa98a3a6b5a8b3adb327fe79a33b20): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.370794 4884 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qtpmp_openshift-operators_5d755f81-3b23-4251-a7c1-6c2ab0a4695d_0(eb43e0683d1d85dcca2480d1d0776f1ea2aa98a3a6b5a8b3adb327fe79a33b20): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.370821 4884 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qtpmp_openshift-operators_5d755f81-3b23-4251-a7c1-6c2ab0a4695d_0(eb43e0683d1d85dcca2480d1d0776f1ea2aa98a3a6b5a8b3adb327fe79a33b20): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:19 crc kubenswrapper[4884]: E1210 00:44:19.370866 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-qtpmp_openshift-operators(5d755f81-3b23-4251-a7c1-6c2ab0a4695d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-qtpmp_openshift-operators(5d755f81-3b23-4251-a7c1-6c2ab0a4695d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qtpmp_openshift-operators_5d755f81-3b23-4251-a7c1-6c2ab0a4695d_0(eb43e0683d1d85dcca2480d1d0776f1ea2aa98a3a6b5a8b3adb327fe79a33b20): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" podUID="5d755f81-3b23-4251-a7c1-6c2ab0a4695d" Dec 10 00:44:22 crc kubenswrapper[4884]: I1210 00:44:22.812150 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:22 crc kubenswrapper[4884]: I1210 00:44:22.894891 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.117956 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95"] Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.118074 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.118526 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.143331 4884 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_openshift-operators_0a9a5957-1f59-42ac-bc0d-3f3b494c4603_0(0e5e9cbf52984984aa52a71ed88513ba2782bdf01e92d30e3e69c356a4083152): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.143399 4884 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_openshift-operators_0a9a5957-1f59-42ac-bc0d-3f3b494c4603_0(0e5e9cbf52984984aa52a71ed88513ba2782bdf01e92d30e3e69c356a4083152): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.143421 4884 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_openshift-operators_0a9a5957-1f59-42ac-bc0d-3f3b494c4603_0(0e5e9cbf52984984aa52a71ed88513ba2782bdf01e92d30e3e69c356a4083152): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.143489 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_openshift-operators(0a9a5957-1f59-42ac-bc0d-3f3b494c4603)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_openshift-operators(0a9a5957-1f59-42ac-bc0d-3f3b494c4603)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_openshift-operators_0a9a5957-1f59-42ac-bc0d-3f3b494c4603_0(0e5e9cbf52984984aa52a71ed88513ba2782bdf01e92d30e3e69c356a4083152): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" podUID="0a9a5957-1f59-42ac-bc0d-3f3b494c4603" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.148147 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm"] Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.148288 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.148693 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.155722 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-4pc64"] Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.155840 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.156223 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.178651 4884 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jvfjm_openshift-operators_ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8_0(52f4e1bd10e12762371053ba982f9d688640e1af5c3913c9fcd344f0d07411dc): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.178707 4884 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jvfjm_openshift-operators_ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8_0(52f4e1bd10e12762371053ba982f9d688640e1af5c3913c9fcd344f0d07411dc): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.178733 4884 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jvfjm_openshift-operators_ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8_0(52f4e1bd10e12762371053ba982f9d688640e1af5c3913c9fcd344f0d07411dc): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.178779 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-668cf9dfbb-jvfjm_openshift-operators(ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-668cf9dfbb-jvfjm_openshift-operators(ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-668cf9dfbb-jvfjm_openshift-operators_ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8_0(52f4e1bd10e12762371053ba982f9d688640e1af5c3913c9fcd344f0d07411dc): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" podUID="ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.188605 4884 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-4pc64_openshift-operators_965132f2-3d6b-4773-ac9a-9b1e964fb251_0(3192d329b5a3deb6c299b8a1232cb9a73d8693e806ec872f1452a0e2974c89b3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.188651 4884 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-4pc64_openshift-operators_965132f2-3d6b-4773-ac9a-9b1e964fb251_0(3192d329b5a3deb6c299b8a1232cb9a73d8693e806ec872f1452a0e2974c89b3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.188672 4884 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-4pc64_openshift-operators_965132f2-3d6b-4773-ac9a-9b1e964fb251_0(3192d329b5a3deb6c299b8a1232cb9a73d8693e806ec872f1452a0e2974c89b3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.188718 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"observability-operator-d8bb48f5d-4pc64_openshift-operators(965132f2-3d6b-4773-ac9a-9b1e964fb251)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"observability-operator-d8bb48f5d-4pc64_openshift-operators(965132f2-3d6b-4773-ac9a-9b1e964fb251)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_observability-operator-d8bb48f5d-4pc64_openshift-operators_965132f2-3d6b-4773-ac9a-9b1e964fb251_0(3192d329b5a3deb6c299b8a1232cb9a73d8693e806ec872f1452a0e2974c89b3): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" podUID="965132f2-3d6b-4773-ac9a-9b1e964fb251" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.195788 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" event={"ID":"3117fc31-26b3-498f-8121-d5e7297b9986","Type":"ContainerStarted","Data":"ee100924ea46cd1381f7d2dad9423613fa9ecafeb575d89b804ec94da5d156b2"} Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.196016 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.196140 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.196159 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.197494 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth"] Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.197571 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.197939 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.236971 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-qtpmp"] Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.237115 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.237592 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.300317 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" podStartSLOduration=10.300296424 podStartE2EDuration="10.300296424s" podCreationTimestamp="2025-12-10 00:44:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:44:23.297450073 +0000 UTC m=+836.375407210" watchObservedRunningTime="2025-12-10 00:44:23.300296424 +0000 UTC m=+836.378253541" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.312655 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.326596 4884 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_openshift-operators_88c45bd9-28e4-457c-aa6e-571ef793eda2_0(90f02e9cda7d3cf0b969dda77839a565148f5937ba04850d69c650927cac0ed6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.326668 4884 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_openshift-operators_88c45bd9-28e4-457c-aa6e-571ef793eda2_0(90f02e9cda7d3cf0b969dda77839a565148f5937ba04850d69c650927cac0ed6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.326695 4884 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_openshift-operators_88c45bd9-28e4-457c-aa6e-571ef793eda2_0(90f02e9cda7d3cf0b969dda77839a565148f5937ba04850d69c650927cac0ed6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.326747 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_openshift-operators(88c45bd9-28e4-457c-aa6e-571ef793eda2)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_openshift-operators(88c45bd9-28e4-457c-aa6e-571ef793eda2)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_openshift-operators_88c45bd9-28e4-457c-aa6e-571ef793eda2_0(90f02e9cda7d3cf0b969dda77839a565148f5937ba04850d69c650927cac0ed6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" podUID="88c45bd9-28e4-457c-aa6e-571ef793eda2" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.342589 4884 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qtpmp_openshift-operators_5d755f81-3b23-4251-a7c1-6c2ab0a4695d_0(b33de7589a656969bc81e934d866ab709742010cd1d9832af049c657b64a2782): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.342668 4884 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qtpmp_openshift-operators_5d755f81-3b23-4251-a7c1-6c2ab0a4695d_0(b33de7589a656969bc81e934d866ab709742010cd1d9832af049c657b64a2782): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.342692 4884 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qtpmp_openshift-operators_5d755f81-3b23-4251-a7c1-6c2ab0a4695d_0(b33de7589a656969bc81e934d866ab709742010cd1d9832af049c657b64a2782): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:23 crc kubenswrapper[4884]: E1210 00:44:23.342737 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"perses-operator-5446b9c989-qtpmp_openshift-operators(5d755f81-3b23-4251-a7c1-6c2ab0a4695d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"perses-operator-5446b9c989-qtpmp_openshift-operators(5d755f81-3b23-4251-a7c1-6c2ab0a4695d)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_perses-operator-5446b9c989-qtpmp_openshift-operators_5d755f81-3b23-4251-a7c1-6c2ab0a4695d_0(b33de7589a656969bc81e934d866ab709742010cd1d9832af049c657b64a2782): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" podUID="5d755f81-3b23-4251-a7c1-6c2ab0a4695d" Dec 10 00:44:23 crc kubenswrapper[4884]: I1210 00:44:23.359718 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:24 crc kubenswrapper[4884]: I1210 00:44:24.833859 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xzh8t"] Dec 10 00:44:24 crc kubenswrapper[4884]: I1210 00:44:24.834462 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xzh8t" podUID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" containerName="registry-server" containerID="cri-o://60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb" gracePeriod=2 Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.105256 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.209924 4884 generic.go:334] "Generic (PLEG): container finished" podID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" containerID="60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb" exitCode=0 Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.209991 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xzh8t" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.210023 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzh8t" event={"ID":"9cff36d7-7f16-4fed-b966-9b4eb28d296b","Type":"ContainerDied","Data":"60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb"} Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.210068 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzh8t" event={"ID":"9cff36d7-7f16-4fed-b966-9b4eb28d296b","Type":"ContainerDied","Data":"2cd0472aea12528dc59e696155e256b207638b3e47e27941652620965b91cd09"} Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.210085 4884 scope.go:117] "RemoveContainer" containerID="60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.224822 4884 scope.go:117] "RemoveContainer" containerID="e01e1c597de535c0d14dc0706eb1ddcec3c228b607f2eb59c6b6ed6d5ff1fbb8" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.239599 4884 scope.go:117] "RemoveContainer" containerID="75ca3bde973bf525051dcad83ff053a85742391bb9c4e860272d35125ea64aba" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.244426 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6wv6t\" (UniqueName: \"kubernetes.io/projected/9cff36d7-7f16-4fed-b966-9b4eb28d296b-kube-api-access-6wv6t\") pod \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\" (UID: \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\") " Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.244480 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cff36d7-7f16-4fed-b966-9b4eb28d296b-catalog-content\") pod \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\" (UID: \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\") " Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.244533 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cff36d7-7f16-4fed-b966-9b4eb28d296b-utilities\") pod \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\" (UID: \"9cff36d7-7f16-4fed-b966-9b4eb28d296b\") " Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.245288 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cff36d7-7f16-4fed-b966-9b4eb28d296b-utilities" (OuterVolumeSpecName: "utilities") pod "9cff36d7-7f16-4fed-b966-9b4eb28d296b" (UID: "9cff36d7-7f16-4fed-b966-9b4eb28d296b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.252719 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cff36d7-7f16-4fed-b966-9b4eb28d296b-kube-api-access-6wv6t" (OuterVolumeSpecName: "kube-api-access-6wv6t") pod "9cff36d7-7f16-4fed-b966-9b4eb28d296b" (UID: "9cff36d7-7f16-4fed-b966-9b4eb28d296b"). InnerVolumeSpecName "kube-api-access-6wv6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.275316 4884 scope.go:117] "RemoveContainer" containerID="60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb" Dec 10 00:44:25 crc kubenswrapper[4884]: E1210 00:44:25.275862 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb\": container with ID starting with 60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb not found: ID does not exist" containerID="60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.275893 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb"} err="failed to get container status \"60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb\": rpc error: code = NotFound desc = could not find container \"60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb\": container with ID starting with 60b65c41ee9c40cf72b44758ac7c74b31ff0270c43bc299dd64b17f1d37f45bb not found: ID does not exist" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.275912 4884 scope.go:117] "RemoveContainer" containerID="e01e1c597de535c0d14dc0706eb1ddcec3c228b607f2eb59c6b6ed6d5ff1fbb8" Dec 10 00:44:25 crc kubenswrapper[4884]: E1210 00:44:25.276319 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e01e1c597de535c0d14dc0706eb1ddcec3c228b607f2eb59c6b6ed6d5ff1fbb8\": container with ID starting with e01e1c597de535c0d14dc0706eb1ddcec3c228b607f2eb59c6b6ed6d5ff1fbb8 not found: ID does not exist" containerID="e01e1c597de535c0d14dc0706eb1ddcec3c228b607f2eb59c6b6ed6d5ff1fbb8" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.276371 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e01e1c597de535c0d14dc0706eb1ddcec3c228b607f2eb59c6b6ed6d5ff1fbb8"} err="failed to get container status \"e01e1c597de535c0d14dc0706eb1ddcec3c228b607f2eb59c6b6ed6d5ff1fbb8\": rpc error: code = NotFound desc = could not find container \"e01e1c597de535c0d14dc0706eb1ddcec3c228b607f2eb59c6b6ed6d5ff1fbb8\": container with ID starting with e01e1c597de535c0d14dc0706eb1ddcec3c228b607f2eb59c6b6ed6d5ff1fbb8 not found: ID does not exist" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.276407 4884 scope.go:117] "RemoveContainer" containerID="75ca3bde973bf525051dcad83ff053a85742391bb9c4e860272d35125ea64aba" Dec 10 00:44:25 crc kubenswrapper[4884]: E1210 00:44:25.276709 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75ca3bde973bf525051dcad83ff053a85742391bb9c4e860272d35125ea64aba\": container with ID starting with 75ca3bde973bf525051dcad83ff053a85742391bb9c4e860272d35125ea64aba not found: ID does not exist" containerID="75ca3bde973bf525051dcad83ff053a85742391bb9c4e860272d35125ea64aba" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.276741 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75ca3bde973bf525051dcad83ff053a85742391bb9c4e860272d35125ea64aba"} err="failed to get container status \"75ca3bde973bf525051dcad83ff053a85742391bb9c4e860272d35125ea64aba\": rpc error: code = NotFound desc = could not find container \"75ca3bde973bf525051dcad83ff053a85742391bb9c4e860272d35125ea64aba\": container with ID starting with 75ca3bde973bf525051dcad83ff053a85742391bb9c4e860272d35125ea64aba not found: ID does not exist" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.345769 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6wv6t\" (UniqueName: \"kubernetes.io/projected/9cff36d7-7f16-4fed-b966-9b4eb28d296b-kube-api-access-6wv6t\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.346096 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9cff36d7-7f16-4fed-b966-9b4eb28d296b-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.364925 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9cff36d7-7f16-4fed-b966-9b4eb28d296b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9cff36d7-7f16-4fed-b966-9b4eb28d296b" (UID: "9cff36d7-7f16-4fed-b966-9b4eb28d296b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.447268 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9cff36d7-7f16-4fed-b966-9b4eb28d296b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.534515 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xzh8t"] Dec 10 00:44:25 crc kubenswrapper[4884]: I1210 00:44:25.535737 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xzh8t"] Dec 10 00:44:27 crc kubenswrapper[4884]: I1210 00:44:27.300321 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" path="/var/lib/kubelet/pods/9cff36d7-7f16-4fed-b966-9b4eb28d296b/volumes" Dec 10 00:44:34 crc kubenswrapper[4884]: I1210 00:44:34.286308 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:34 crc kubenswrapper[4884]: I1210 00:44:34.286385 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:34 crc kubenswrapper[4884]: I1210 00:44:34.287237 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" Dec 10 00:44:34 crc kubenswrapper[4884]: I1210 00:44:34.287333 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:34 crc kubenswrapper[4884]: I1210 00:44:34.549774 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-qtpmp"] Dec 10 00:44:34 crc kubenswrapper[4884]: W1210 00:44:34.564768 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d755f81_3b23_4251_a7c1_6c2ab0a4695d.slice/crio-001e5b03aec6e343e9a93535844d1293a65d3b5fa1c3dabbf40a82688c708c1c WatchSource:0}: Error finding container 001e5b03aec6e343e9a93535844d1293a65d3b5fa1c3dabbf40a82688c708c1c: Status 404 returned error can't find the container with id 001e5b03aec6e343e9a93535844d1293a65d3b5fa1c3dabbf40a82688c708c1c Dec 10 00:44:34 crc kubenswrapper[4884]: I1210 00:44:34.605751 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95"] Dec 10 00:44:34 crc kubenswrapper[4884]: W1210 00:44:34.607716 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a9a5957_1f59_42ac_bc0d_3f3b494c4603.slice/crio-278b5a56fc5658f3c2964a72395ed66a689bba0cfe8f845bdb695fd8cc19d860 WatchSource:0}: Error finding container 278b5a56fc5658f3c2964a72395ed66a689bba0cfe8f845bdb695fd8cc19d860: Status 404 returned error can't find the container with id 278b5a56fc5658f3c2964a72395ed66a689bba0cfe8f845bdb695fd8cc19d860 Dec 10 00:44:35 crc kubenswrapper[4884]: I1210 00:44:35.260276 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" event={"ID":"0a9a5957-1f59-42ac-bc0d-3f3b494c4603","Type":"ContainerStarted","Data":"278b5a56fc5658f3c2964a72395ed66a689bba0cfe8f845bdb695fd8cc19d860"} Dec 10 00:44:35 crc kubenswrapper[4884]: I1210 00:44:35.261702 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" event={"ID":"5d755f81-3b23-4251-a7c1-6c2ab0a4695d","Type":"ContainerStarted","Data":"001e5b03aec6e343e9a93535844d1293a65d3b5fa1c3dabbf40a82688c708c1c"} Dec 10 00:44:35 crc kubenswrapper[4884]: I1210 00:44:35.286425 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:35 crc kubenswrapper[4884]: I1210 00:44:35.286953 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" Dec 10 00:44:35 crc kubenswrapper[4884]: I1210 00:44:35.536678 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm"] Dec 10 00:44:36 crc kubenswrapper[4884]: I1210 00:44:36.271320 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" event={"ID":"ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8","Type":"ContainerStarted","Data":"fd9470ef2eb01a110538fdd81a10bb080f12e3efe97ac5e2c484001bef802424"} Dec 10 00:44:38 crc kubenswrapper[4884]: I1210 00:44:38.286878 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:38 crc kubenswrapper[4884]: I1210 00:44:38.287589 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" Dec 10 00:44:38 crc kubenswrapper[4884]: I1210 00:44:38.286894 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:38 crc kubenswrapper[4884]: I1210 00:44:38.288036 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:38 crc kubenswrapper[4884]: I1210 00:44:38.708177 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth"] Dec 10 00:44:38 crc kubenswrapper[4884]: W1210 00:44:38.727044 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod88c45bd9_28e4_457c_aa6e_571ef793eda2.slice/crio-a5941350569a30e9cf2c7a144b2759401c9c0a5824dae046941fc717e02a4ce8 WatchSource:0}: Error finding container a5941350569a30e9cf2c7a144b2759401c9c0a5824dae046941fc717e02a4ce8: Status 404 returned error can't find the container with id a5941350569a30e9cf2c7a144b2759401c9c0a5824dae046941fc717e02a4ce8 Dec 10 00:44:38 crc kubenswrapper[4884]: I1210 00:44:38.765354 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-4pc64"] Dec 10 00:44:38 crc kubenswrapper[4884]: W1210 00:44:38.771795 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod965132f2_3d6b_4773_ac9a_9b1e964fb251.slice/crio-66c49e480868f7b42c73efd573e7ab2332ff11ffbdb579a3450cdbaa04091292 WatchSource:0}: Error finding container 66c49e480868f7b42c73efd573e7ab2332ff11ffbdb579a3450cdbaa04091292: Status 404 returned error can't find the container with id 66c49e480868f7b42c73efd573e7ab2332ff11ffbdb579a3450cdbaa04091292 Dec 10 00:44:39 crc kubenswrapper[4884]: I1210 00:44:39.294977 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" event={"ID":"88c45bd9-28e4-457c-aa6e-571ef793eda2","Type":"ContainerStarted","Data":"a5941350569a30e9cf2c7a144b2759401c9c0a5824dae046941fc717e02a4ce8"} Dec 10 00:44:39 crc kubenswrapper[4884]: I1210 00:44:39.295413 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" event={"ID":"965132f2-3d6b-4773-ac9a-9b1e964fb251","Type":"ContainerStarted","Data":"66c49e480868f7b42c73efd573e7ab2332ff11ffbdb579a3450cdbaa04091292"} Dec 10 00:44:43 crc kubenswrapper[4884]: I1210 00:44:43.735170 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-zz62l" Dec 10 00:44:48 crc kubenswrapper[4884]: I1210 00:44:48.344769 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" event={"ID":"88c45bd9-28e4-457c-aa6e-571ef793eda2","Type":"ContainerStarted","Data":"9e330c3a1e6c7bbf58936984e3c2b92f0667d7a1ce00a6499eb4e12c46c7803e"} Dec 10 00:44:48 crc kubenswrapper[4884]: I1210 00:44:48.346141 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" event={"ID":"5d755f81-3b23-4251-a7c1-6c2ab0a4695d","Type":"ContainerStarted","Data":"ea6155ce144438392afb5da0a3d76d5a2b5c10c97340fcb18837e9389e0b4b13"} Dec 10 00:44:48 crc kubenswrapper[4884]: I1210 00:44:48.346462 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:48 crc kubenswrapper[4884]: I1210 00:44:48.347622 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" event={"ID":"965132f2-3d6b-4773-ac9a-9b1e964fb251","Type":"ContainerStarted","Data":"2709bfc5231307e4ed5abaa1b399f5099a437f7edd2bf3d60484035214f9b399"} Dec 10 00:44:48 crc kubenswrapper[4884]: I1210 00:44:48.347834 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:48 crc kubenswrapper[4884]: I1210 00:44:48.348850 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" event={"ID":"0a9a5957-1f59-42ac-bc0d-3f3b494c4603","Type":"ContainerStarted","Data":"59841c7e19ed06c2fa5aa1a1efedc2b38a350e930bf8613ce184505667d41d6e"} Dec 10 00:44:48 crc kubenswrapper[4884]: I1210 00:44:48.349041 4884 patch_prober.go:28] interesting pod/observability-operator-d8bb48f5d-4pc64 container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.10:8081/healthz\": dial tcp 10.217.0.10:8081: connect: connection refused" start-of-body= Dec 10 00:44:48 crc kubenswrapper[4884]: I1210 00:44:48.349082 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" podUID="965132f2-3d6b-4773-ac9a-9b1e964fb251" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.10:8081/healthz\": dial tcp 10.217.0.10:8081: connect: connection refused" Dec 10 00:44:48 crc kubenswrapper[4884]: I1210 00:44:48.363251 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth" podStartSLOduration=21.027574443 podStartE2EDuration="30.363229736s" podCreationTimestamp="2025-12-10 00:44:18 +0000 UTC" firstStartedPulling="2025-12-10 00:44:38.73158967 +0000 UTC m=+851.809546787" lastFinishedPulling="2025-12-10 00:44:48.067244943 +0000 UTC m=+861.145202080" observedRunningTime="2025-12-10 00:44:48.359064971 +0000 UTC m=+861.437022098" watchObservedRunningTime="2025-12-10 00:44:48.363229736 +0000 UTC m=+861.441186853" Dec 10 00:44:48 crc kubenswrapper[4884]: I1210 00:44:48.379866 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95" podStartSLOduration=16.993942732 podStartE2EDuration="30.379842635s" podCreationTimestamp="2025-12-10 00:44:18 +0000 UTC" firstStartedPulling="2025-12-10 00:44:34.608951945 +0000 UTC m=+847.686909062" lastFinishedPulling="2025-12-10 00:44:47.994851838 +0000 UTC m=+861.072808965" observedRunningTime="2025-12-10 00:44:48.375618128 +0000 UTC m=+861.453575265" watchObservedRunningTime="2025-12-10 00:44:48.379842635 +0000 UTC m=+861.457799752" Dec 10 00:44:48 crc kubenswrapper[4884]: I1210 00:44:48.436618 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" podStartSLOduration=17.009018302 podStartE2EDuration="30.436597596s" podCreationTimestamp="2025-12-10 00:44:18 +0000 UTC" firstStartedPulling="2025-12-10 00:44:34.567277304 +0000 UTC m=+847.645234411" lastFinishedPulling="2025-12-10 00:44:47.994856548 +0000 UTC m=+861.072813705" observedRunningTime="2025-12-10 00:44:48.431265231 +0000 UTC m=+861.509222358" watchObservedRunningTime="2025-12-10 00:44:48.436597596 +0000 UTC m=+861.514554713" Dec 10 00:44:49 crc kubenswrapper[4884]: I1210 00:44:49.236713 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" Dec 10 00:44:49 crc kubenswrapper[4884]: I1210 00:44:49.261322 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-4pc64" podStartSLOduration=21.927336609 podStartE2EDuration="31.261296979s" podCreationTimestamp="2025-12-10 00:44:18 +0000 UTC" firstStartedPulling="2025-12-10 00:44:38.782593267 +0000 UTC m=+851.860550384" lastFinishedPulling="2025-12-10 00:44:48.116553627 +0000 UTC m=+861.194510754" observedRunningTime="2025-12-10 00:44:48.477584599 +0000 UTC m=+861.555541726" watchObservedRunningTime="2025-12-10 00:44:49.261296979 +0000 UTC m=+862.339254116" Dec 10 00:44:49 crc kubenswrapper[4884]: I1210 00:44:49.356543 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" event={"ID":"ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8","Type":"ContainerStarted","Data":"8480342fd35da006e5cac0d39736463cb4de752f07fc770cc23baa1f811769c3"} Dec 10 00:44:49 crc kubenswrapper[4884]: I1210 00:44:49.387476 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-jvfjm" podStartSLOduration=18.872090695 podStartE2EDuration="31.387426879s" podCreationTimestamp="2025-12-10 00:44:18 +0000 UTC" firstStartedPulling="2025-12-10 00:44:35.551198502 +0000 UTC m=+848.629155629" lastFinishedPulling="2025-12-10 00:44:48.066534676 +0000 UTC m=+861.144491813" observedRunningTime="2025-12-10 00:44:49.382030883 +0000 UTC m=+862.459988010" watchObservedRunningTime="2025-12-10 00:44:49.387426879 +0000 UTC m=+862.465384006" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.086102 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mxs96"] Dec 10 00:44:58 crc kubenswrapper[4884]: E1210 00:44:58.087103 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" containerName="extract-utilities" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.087137 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" containerName="extract-utilities" Dec 10 00:44:58 crc kubenswrapper[4884]: E1210 00:44:58.087175 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" containerName="registry-server" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.087196 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" containerName="registry-server" Dec 10 00:44:58 crc kubenswrapper[4884]: E1210 00:44:58.087246 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" containerName="extract-content" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.087265 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" containerName="extract-content" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.087562 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cff36d7-7f16-4fed-b966-9b4eb28d296b" containerName="registry-server" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.088592 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-mxs96" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.090891 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.091107 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.091163 4884 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-bskwm" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.104375 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mxs96"] Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.111311 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-h8t2q"] Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.112160 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-h8t2q" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.114370 4884 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-bplx8" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.129538 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-v52kp"] Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.130732 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-v52kp" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.132928 4884 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-5ddwp" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.140407 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-h8t2q"] Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.148668 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-v52kp"] Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.211290 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snvlb\" (UniqueName: \"kubernetes.io/projected/c9b24f3f-f93f-4514-8162-681c6b3bc0ad-kube-api-access-snvlb\") pod \"cert-manager-cainjector-7f985d654d-mxs96\" (UID: \"c9b24f3f-f93f-4514-8162-681c6b3bc0ad\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mxs96" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.211348 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjqpj\" (UniqueName: \"kubernetes.io/projected/f4fb928c-c33e-4b15-a9ac-a8936ddc4439-kube-api-access-mjqpj\") pod \"cert-manager-5b446d88c5-h8t2q\" (UID: \"f4fb928c-c33e-4b15-a9ac-a8936ddc4439\") " pod="cert-manager/cert-manager-5b446d88c5-h8t2q" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.313001 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkqtl\" (UniqueName: \"kubernetes.io/projected/6c33c936-cf88-4bce-91f1-bc463f39a9ef-kube-api-access-qkqtl\") pod \"cert-manager-webhook-5655c58dd6-v52kp\" (UID: \"6c33c936-cf88-4bce-91f1-bc463f39a9ef\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-v52kp" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.313424 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snvlb\" (UniqueName: \"kubernetes.io/projected/c9b24f3f-f93f-4514-8162-681c6b3bc0ad-kube-api-access-snvlb\") pod \"cert-manager-cainjector-7f985d654d-mxs96\" (UID: \"c9b24f3f-f93f-4514-8162-681c6b3bc0ad\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mxs96" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.313481 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjqpj\" (UniqueName: \"kubernetes.io/projected/f4fb928c-c33e-4b15-a9ac-a8936ddc4439-kube-api-access-mjqpj\") pod \"cert-manager-5b446d88c5-h8t2q\" (UID: \"f4fb928c-c33e-4b15-a9ac-a8936ddc4439\") " pod="cert-manager/cert-manager-5b446d88c5-h8t2q" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.337503 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjqpj\" (UniqueName: \"kubernetes.io/projected/f4fb928c-c33e-4b15-a9ac-a8936ddc4439-kube-api-access-mjqpj\") pod \"cert-manager-5b446d88c5-h8t2q\" (UID: \"f4fb928c-c33e-4b15-a9ac-a8936ddc4439\") " pod="cert-manager/cert-manager-5b446d88c5-h8t2q" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.337557 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snvlb\" (UniqueName: \"kubernetes.io/projected/c9b24f3f-f93f-4514-8162-681c6b3bc0ad-kube-api-access-snvlb\") pod \"cert-manager-cainjector-7f985d654d-mxs96\" (UID: \"c9b24f3f-f93f-4514-8162-681c6b3bc0ad\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-mxs96" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.415053 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkqtl\" (UniqueName: \"kubernetes.io/projected/6c33c936-cf88-4bce-91f1-bc463f39a9ef-kube-api-access-qkqtl\") pod \"cert-manager-webhook-5655c58dd6-v52kp\" (UID: \"6c33c936-cf88-4bce-91f1-bc463f39a9ef\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-v52kp" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.417353 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-mxs96" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.435829 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-h8t2q" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.448138 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkqtl\" (UniqueName: \"kubernetes.io/projected/6c33c936-cf88-4bce-91f1-bc463f39a9ef-kube-api-access-qkqtl\") pod \"cert-manager-webhook-5655c58dd6-v52kp\" (UID: \"6c33c936-cf88-4bce-91f1-bc463f39a9ef\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-v52kp" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.454880 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-v52kp" Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.716004 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-h8t2q"] Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.867073 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-v52kp"] Dec 10 00:44:58 crc kubenswrapper[4884]: I1210 00:44:58.884886 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-mxs96"] Dec 10 00:44:58 crc kubenswrapper[4884]: W1210 00:44:58.895203 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9b24f3f_f93f_4514_8162_681c6b3bc0ad.slice/crio-a4cdaa4e948577e6a010cadc7f4968a01b3e1033cbd86cd7c9eee78e1b5dab5b WatchSource:0}: Error finding container a4cdaa4e948577e6a010cadc7f4968a01b3e1033cbd86cd7c9eee78e1b5dab5b: Status 404 returned error can't find the container with id a4cdaa4e948577e6a010cadc7f4968a01b3e1033cbd86cd7c9eee78e1b5dab5b Dec 10 00:44:59 crc kubenswrapper[4884]: I1210 00:44:59.352515 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-qtpmp" Dec 10 00:44:59 crc kubenswrapper[4884]: I1210 00:44:59.419570 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-h8t2q" event={"ID":"f4fb928c-c33e-4b15-a9ac-a8936ddc4439","Type":"ContainerStarted","Data":"b68d04e7e37bf85bded3f0cc0ba7461b6e5131b03d9f16620e56e03ee6397f5a"} Dec 10 00:44:59 crc kubenswrapper[4884]: I1210 00:44:59.429201 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-v52kp" event={"ID":"6c33c936-cf88-4bce-91f1-bc463f39a9ef","Type":"ContainerStarted","Data":"15457189bd64ab5525e491f0902337cb20fe8e99829dae654e4aeef7bab244e6"} Dec 10 00:44:59 crc kubenswrapper[4884]: I1210 00:44:59.430294 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-mxs96" event={"ID":"c9b24f3f-f93f-4514-8162-681c6b3bc0ad","Type":"ContainerStarted","Data":"a4cdaa4e948577e6a010cadc7f4968a01b3e1033cbd86cd7c9eee78e1b5dab5b"} Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.183831 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw"] Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.184705 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.187154 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.187304 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.201429 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw"] Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.259191 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hjzb\" (UniqueName: \"kubernetes.io/projected/7ba8bede-46ca-4caf-be35-617a7c91ef20-kube-api-access-9hjzb\") pod \"collect-profiles-29422125-dqxhw\" (UID: \"7ba8bede-46ca-4caf-be35-617a7c91ef20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.259526 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ba8bede-46ca-4caf-be35-617a7c91ef20-config-volume\") pod \"collect-profiles-29422125-dqxhw\" (UID: \"7ba8bede-46ca-4caf-be35-617a7c91ef20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.259551 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ba8bede-46ca-4caf-be35-617a7c91ef20-secret-volume\") pod \"collect-profiles-29422125-dqxhw\" (UID: \"7ba8bede-46ca-4caf-be35-617a7c91ef20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.360683 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hjzb\" (UniqueName: \"kubernetes.io/projected/7ba8bede-46ca-4caf-be35-617a7c91ef20-kube-api-access-9hjzb\") pod \"collect-profiles-29422125-dqxhw\" (UID: \"7ba8bede-46ca-4caf-be35-617a7c91ef20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.360757 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ba8bede-46ca-4caf-be35-617a7c91ef20-config-volume\") pod \"collect-profiles-29422125-dqxhw\" (UID: \"7ba8bede-46ca-4caf-be35-617a7c91ef20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.360790 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ba8bede-46ca-4caf-be35-617a7c91ef20-secret-volume\") pod \"collect-profiles-29422125-dqxhw\" (UID: \"7ba8bede-46ca-4caf-be35-617a7c91ef20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.362015 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ba8bede-46ca-4caf-be35-617a7c91ef20-config-volume\") pod \"collect-profiles-29422125-dqxhw\" (UID: \"7ba8bede-46ca-4caf-be35-617a7c91ef20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.372040 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ba8bede-46ca-4caf-be35-617a7c91ef20-secret-volume\") pod \"collect-profiles-29422125-dqxhw\" (UID: \"7ba8bede-46ca-4caf-be35-617a7c91ef20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.379514 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hjzb\" (UniqueName: \"kubernetes.io/projected/7ba8bede-46ca-4caf-be35-617a7c91ef20-kube-api-access-9hjzb\") pod \"collect-profiles-29422125-dqxhw\" (UID: \"7ba8bede-46ca-4caf-be35-617a7c91ef20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:00 crc kubenswrapper[4884]: I1210 00:45:00.500066 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:03 crc kubenswrapper[4884]: I1210 00:45:03.141427 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw"] Dec 10 00:45:03 crc kubenswrapper[4884]: W1210 00:45:03.372471 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7ba8bede_46ca_4caf_be35_617a7c91ef20.slice/crio-8975d17697afc0bdd4c5813ead03bcd6bffa1e99fc17e9a94439d5e6507d5dac WatchSource:0}: Error finding container 8975d17697afc0bdd4c5813ead03bcd6bffa1e99fc17e9a94439d5e6507d5dac: Status 404 returned error can't find the container with id 8975d17697afc0bdd4c5813ead03bcd6bffa1e99fc17e9a94439d5e6507d5dac Dec 10 00:45:03 crc kubenswrapper[4884]: I1210 00:45:03.461831 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" event={"ID":"7ba8bede-46ca-4caf-be35-617a7c91ef20","Type":"ContainerStarted","Data":"8975d17697afc0bdd4c5813ead03bcd6bffa1e99fc17e9a94439d5e6507d5dac"} Dec 10 00:45:04 crc kubenswrapper[4884]: I1210 00:45:04.467738 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-h8t2q" event={"ID":"f4fb928c-c33e-4b15-a9ac-a8936ddc4439","Type":"ContainerStarted","Data":"a11a42cb46b2818ea8977f5282000f2029d453862bf48c4bbee7b0004b653cde"} Dec 10 00:45:04 crc kubenswrapper[4884]: I1210 00:45:04.469501 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-v52kp" event={"ID":"6c33c936-cf88-4bce-91f1-bc463f39a9ef","Type":"ContainerStarted","Data":"b030bad60d86072021c32d0c8a11eb85a4bfebd9268bb25b220670daa7d70889"} Dec 10 00:45:04 crc kubenswrapper[4884]: I1210 00:45:04.469615 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-v52kp" Dec 10 00:45:04 crc kubenswrapper[4884]: I1210 00:45:04.471023 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-mxs96" event={"ID":"c9b24f3f-f93f-4514-8162-681c6b3bc0ad","Type":"ContainerStarted","Data":"14c4e72742ba33fd5abfbdb9759928d0e3fae48b5eef24c0595db9d32cb25a0c"} Dec 10 00:45:04 crc kubenswrapper[4884]: I1210 00:45:04.473305 4884 generic.go:334] "Generic (PLEG): container finished" podID="7ba8bede-46ca-4caf-be35-617a7c91ef20" containerID="5e4973a2393c8c1f289c722babb93f4ff22ce7feeb7d2b6fcce909afc097f39a" exitCode=0 Dec 10 00:45:04 crc kubenswrapper[4884]: I1210 00:45:04.473344 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" event={"ID":"7ba8bede-46ca-4caf-be35-617a7c91ef20","Type":"ContainerDied","Data":"5e4973a2393c8c1f289c722babb93f4ff22ce7feeb7d2b6fcce909afc097f39a"} Dec 10 00:45:04 crc kubenswrapper[4884]: I1210 00:45:04.486010 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-h8t2q" podStartSLOduration=1.680673267 podStartE2EDuration="6.485989396s" podCreationTimestamp="2025-12-10 00:44:58 +0000 UTC" firstStartedPulling="2025-12-10 00:44:58.722065697 +0000 UTC m=+871.800022814" lastFinishedPulling="2025-12-10 00:45:03.527381826 +0000 UTC m=+876.605338943" observedRunningTime="2025-12-10 00:45:04.483706418 +0000 UTC m=+877.561663535" watchObservedRunningTime="2025-12-10 00:45:04.485989396 +0000 UTC m=+877.563946523" Dec 10 00:45:04 crc kubenswrapper[4884]: I1210 00:45:04.515983 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-mxs96" podStartSLOduration=1.834468416 podStartE2EDuration="6.515966462s" podCreationTimestamp="2025-12-10 00:44:58 +0000 UTC" firstStartedPulling="2025-12-10 00:44:58.897338847 +0000 UTC m=+871.975295964" lastFinishedPulling="2025-12-10 00:45:03.578836893 +0000 UTC m=+876.656794010" observedRunningTime="2025-12-10 00:45:04.513881859 +0000 UTC m=+877.591838996" watchObservedRunningTime="2025-12-10 00:45:04.515966462 +0000 UTC m=+877.593923599" Dec 10 00:45:04 crc kubenswrapper[4884]: I1210 00:45:04.539164 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-v52kp" podStartSLOduration=1.8449464290000002 podStartE2EDuration="6.539146886s" podCreationTimestamp="2025-12-10 00:44:58 +0000 UTC" firstStartedPulling="2025-12-10 00:44:58.878396479 +0000 UTC m=+871.956353596" lastFinishedPulling="2025-12-10 00:45:03.572596936 +0000 UTC m=+876.650554053" observedRunningTime="2025-12-10 00:45:04.537187987 +0000 UTC m=+877.615145154" watchObservedRunningTime="2025-12-10 00:45:04.539146886 +0000 UTC m=+877.617103993" Dec 10 00:45:05 crc kubenswrapper[4884]: I1210 00:45:05.688162 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:05 crc kubenswrapper[4884]: I1210 00:45:05.834014 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ba8bede-46ca-4caf-be35-617a7c91ef20-config-volume\") pod \"7ba8bede-46ca-4caf-be35-617a7c91ef20\" (UID: \"7ba8bede-46ca-4caf-be35-617a7c91ef20\") " Dec 10 00:45:05 crc kubenswrapper[4884]: I1210 00:45:05.834162 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ba8bede-46ca-4caf-be35-617a7c91ef20-secret-volume\") pod \"7ba8bede-46ca-4caf-be35-617a7c91ef20\" (UID: \"7ba8bede-46ca-4caf-be35-617a7c91ef20\") " Dec 10 00:45:05 crc kubenswrapper[4884]: I1210 00:45:05.834281 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9hjzb\" (UniqueName: \"kubernetes.io/projected/7ba8bede-46ca-4caf-be35-617a7c91ef20-kube-api-access-9hjzb\") pod \"7ba8bede-46ca-4caf-be35-617a7c91ef20\" (UID: \"7ba8bede-46ca-4caf-be35-617a7c91ef20\") " Dec 10 00:45:05 crc kubenswrapper[4884]: I1210 00:45:05.834963 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ba8bede-46ca-4caf-be35-617a7c91ef20-config-volume" (OuterVolumeSpecName: "config-volume") pod "7ba8bede-46ca-4caf-be35-617a7c91ef20" (UID: "7ba8bede-46ca-4caf-be35-617a7c91ef20"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:45:05 crc kubenswrapper[4884]: I1210 00:45:05.843008 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ba8bede-46ca-4caf-be35-617a7c91ef20-kube-api-access-9hjzb" (OuterVolumeSpecName: "kube-api-access-9hjzb") pod "7ba8bede-46ca-4caf-be35-617a7c91ef20" (UID: "7ba8bede-46ca-4caf-be35-617a7c91ef20"). InnerVolumeSpecName "kube-api-access-9hjzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:45:05 crc kubenswrapper[4884]: I1210 00:45:05.844347 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ba8bede-46ca-4caf-be35-617a7c91ef20-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7ba8bede-46ca-4caf-be35-617a7c91ef20" (UID: "7ba8bede-46ca-4caf-be35-617a7c91ef20"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:45:05 crc kubenswrapper[4884]: I1210 00:45:05.936660 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ba8bede-46ca-4caf-be35-617a7c91ef20-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 00:45:05 crc kubenswrapper[4884]: I1210 00:45:05.936691 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9hjzb\" (UniqueName: \"kubernetes.io/projected/7ba8bede-46ca-4caf-be35-617a7c91ef20-kube-api-access-9hjzb\") on node \"crc\" DevicePath \"\"" Dec 10 00:45:05 crc kubenswrapper[4884]: I1210 00:45:05.936700 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ba8bede-46ca-4caf-be35-617a7c91ef20-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 00:45:06 crc kubenswrapper[4884]: I1210 00:45:06.488228 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" event={"ID":"7ba8bede-46ca-4caf-be35-617a7c91ef20","Type":"ContainerDied","Data":"8975d17697afc0bdd4c5813ead03bcd6bffa1e99fc17e9a94439d5e6507d5dac"} Dec 10 00:45:06 crc kubenswrapper[4884]: I1210 00:45:06.488272 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8975d17697afc0bdd4c5813ead03bcd6bffa1e99fc17e9a94439d5e6507d5dac" Dec 10 00:45:06 crc kubenswrapper[4884]: I1210 00:45:06.488301 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw" Dec 10 00:45:08 crc kubenswrapper[4884]: I1210 00:45:08.458999 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-v52kp" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.503839 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7"] Dec 10 00:45:37 crc kubenswrapper[4884]: E1210 00:45:37.504519 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ba8bede-46ca-4caf-be35-617a7c91ef20" containerName="collect-profiles" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.504530 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ba8bede-46ca-4caf-be35-617a7c91ef20" containerName="collect-profiles" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.504623 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ba8bede-46ca-4caf-be35-617a7c91ef20" containerName="collect-profiles" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.505611 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.507594 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.514303 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7"] Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.526137 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqd6s\" (UniqueName: \"kubernetes.io/projected/c514393e-bc0a-4da5-9b45-de376e94eef8-kube-api-access-wqd6s\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7\" (UID: \"c514393e-bc0a-4da5-9b45-de376e94eef8\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.526366 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c514393e-bc0a-4da5-9b45-de376e94eef8-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7\" (UID: \"c514393e-bc0a-4da5-9b45-de376e94eef8\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.526521 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c514393e-bc0a-4da5-9b45-de376e94eef8-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7\" (UID: \"c514393e-bc0a-4da5-9b45-de376e94eef8\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.627713 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqd6s\" (UniqueName: \"kubernetes.io/projected/c514393e-bc0a-4da5-9b45-de376e94eef8-kube-api-access-wqd6s\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7\" (UID: \"c514393e-bc0a-4da5-9b45-de376e94eef8\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.627998 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c514393e-bc0a-4da5-9b45-de376e94eef8-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7\" (UID: \"c514393e-bc0a-4da5-9b45-de376e94eef8\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.628163 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c514393e-bc0a-4da5-9b45-de376e94eef8-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7\" (UID: \"c514393e-bc0a-4da5-9b45-de376e94eef8\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.628793 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c514393e-bc0a-4da5-9b45-de376e94eef8-bundle\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7\" (UID: \"c514393e-bc0a-4da5-9b45-de376e94eef8\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.628820 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c514393e-bc0a-4da5-9b45-de376e94eef8-util\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7\" (UID: \"c514393e-bc0a-4da5-9b45-de376e94eef8\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.650694 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqd6s\" (UniqueName: \"kubernetes.io/projected/c514393e-bc0a-4da5-9b45-de376e94eef8-kube-api-access-wqd6s\") pod \"4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7\" (UID: \"c514393e-bc0a-4da5-9b45-de376e94eef8\") " pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.708915 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l"] Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.710931 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.725511 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l"] Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.822589 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.830583 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/37f7e3ef-f9a6-4975-ae21-bee745d3063d-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l\" (UID: \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.830662 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/37f7e3ef-f9a6-4975-ae21-bee745d3063d-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l\" (UID: \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.830721 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvntb\" (UniqueName: \"kubernetes.io/projected/37f7e3ef-f9a6-4975-ae21-bee745d3063d-kube-api-access-qvntb\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l\" (UID: \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.931500 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/37f7e3ef-f9a6-4975-ae21-bee745d3063d-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l\" (UID: \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.932024 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/37f7e3ef-f9a6-4975-ae21-bee745d3063d-bundle\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l\" (UID: \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.932457 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/37f7e3ef-f9a6-4975-ae21-bee745d3063d-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l\" (UID: \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.932637 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvntb\" (UniqueName: \"kubernetes.io/projected/37f7e3ef-f9a6-4975-ae21-bee745d3063d-kube-api-access-qvntb\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l\" (UID: \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.932962 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/37f7e3ef-f9a6-4975-ae21-bee745d3063d-util\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l\" (UID: \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:37 crc kubenswrapper[4884]: I1210 00:45:37.955780 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvntb\" (UniqueName: \"kubernetes.io/projected/37f7e3ef-f9a6-4975-ae21-bee745d3063d-kube-api-access-qvntb\") pod \"a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l\" (UID: \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\") " pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:38 crc kubenswrapper[4884]: I1210 00:45:38.039716 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:38 crc kubenswrapper[4884]: I1210 00:45:38.104834 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7"] Dec 10 00:45:38 crc kubenswrapper[4884]: I1210 00:45:38.235172 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l"] Dec 10 00:45:38 crc kubenswrapper[4884]: W1210 00:45:38.239622 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37f7e3ef_f9a6_4975_ae21_bee745d3063d.slice/crio-5f8e799ce5fc2a319f877f539b01b5a6dbd78a0bf62e0fba89bfe1b92ad10e8b WatchSource:0}: Error finding container 5f8e799ce5fc2a319f877f539b01b5a6dbd78a0bf62e0fba89bfe1b92ad10e8b: Status 404 returned error can't find the container with id 5f8e799ce5fc2a319f877f539b01b5a6dbd78a0bf62e0fba89bfe1b92ad10e8b Dec 10 00:45:38 crc kubenswrapper[4884]: I1210 00:45:38.711370 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" event={"ID":"c514393e-bc0a-4da5-9b45-de376e94eef8","Type":"ContainerStarted","Data":"3a4f06fbf652f4496abab2f7690ef29526ae1218d54b8e13d9072d279af7526b"} Dec 10 00:45:38 crc kubenswrapper[4884]: I1210 00:45:38.711421 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" event={"ID":"c514393e-bc0a-4da5-9b45-de376e94eef8","Type":"ContainerStarted","Data":"baeac411a9ad70d0f25dca9fe92eb340d4859173f7e47b4d69571914b7cdfcc4"} Dec 10 00:45:38 crc kubenswrapper[4884]: I1210 00:45:38.715146 4884 generic.go:334] "Generic (PLEG): container finished" podID="37f7e3ef-f9a6-4975-ae21-bee745d3063d" containerID="258fd5799cf408b1357ebd617afa4b0055620801148885b3cd976b5191a10e31" exitCode=0 Dec 10 00:45:38 crc kubenswrapper[4884]: I1210 00:45:38.715195 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" event={"ID":"37f7e3ef-f9a6-4975-ae21-bee745d3063d","Type":"ContainerDied","Data":"258fd5799cf408b1357ebd617afa4b0055620801148885b3cd976b5191a10e31"} Dec 10 00:45:38 crc kubenswrapper[4884]: I1210 00:45:38.715216 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" event={"ID":"37f7e3ef-f9a6-4975-ae21-bee745d3063d","Type":"ContainerStarted","Data":"5f8e799ce5fc2a319f877f539b01b5a6dbd78a0bf62e0fba89bfe1b92ad10e8b"} Dec 10 00:45:39 crc kubenswrapper[4884]: I1210 00:45:39.722412 4884 generic.go:334] "Generic (PLEG): container finished" podID="c514393e-bc0a-4da5-9b45-de376e94eef8" containerID="3a4f06fbf652f4496abab2f7690ef29526ae1218d54b8e13d9072d279af7526b" exitCode=0 Dec 10 00:45:39 crc kubenswrapper[4884]: I1210 00:45:39.722733 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" event={"ID":"c514393e-bc0a-4da5-9b45-de376e94eef8","Type":"ContainerDied","Data":"3a4f06fbf652f4496abab2f7690ef29526ae1218d54b8e13d9072d279af7526b"} Dec 10 00:45:40 crc kubenswrapper[4884]: I1210 00:45:40.739137 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" event={"ID":"37f7e3ef-f9a6-4975-ae21-bee745d3063d","Type":"ContainerStarted","Data":"be9acc11577958c672b00aeaa3c6ce247be36951ef4137186ca0a302281bcd21"} Dec 10 00:45:41 crc kubenswrapper[4884]: I1210 00:45:41.747028 4884 generic.go:334] "Generic (PLEG): container finished" podID="37f7e3ef-f9a6-4975-ae21-bee745d3063d" containerID="be9acc11577958c672b00aeaa3c6ce247be36951ef4137186ca0a302281bcd21" exitCode=0 Dec 10 00:45:41 crc kubenswrapper[4884]: I1210 00:45:41.747096 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" event={"ID":"37f7e3ef-f9a6-4975-ae21-bee745d3063d","Type":"ContainerDied","Data":"be9acc11577958c672b00aeaa3c6ce247be36951ef4137186ca0a302281bcd21"} Dec 10 00:45:41 crc kubenswrapper[4884]: I1210 00:45:41.749762 4884 generic.go:334] "Generic (PLEG): container finished" podID="c514393e-bc0a-4da5-9b45-de376e94eef8" containerID="cb2e5337827dc41ca06194e39ae3853086a440296a956b6bab96cab7506ebde3" exitCode=0 Dec 10 00:45:41 crc kubenswrapper[4884]: I1210 00:45:41.749807 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" event={"ID":"c514393e-bc0a-4da5-9b45-de376e94eef8","Type":"ContainerDied","Data":"cb2e5337827dc41ca06194e39ae3853086a440296a956b6bab96cab7506ebde3"} Dec 10 00:45:42 crc kubenswrapper[4884]: I1210 00:45:42.758048 4884 generic.go:334] "Generic (PLEG): container finished" podID="c514393e-bc0a-4da5-9b45-de376e94eef8" containerID="95952382b7442ee6cd9670d6ffecd0e85261b40ef9e2cc4dbb4ec758a117d8af" exitCode=0 Dec 10 00:45:42 crc kubenswrapper[4884]: I1210 00:45:42.758129 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" event={"ID":"c514393e-bc0a-4da5-9b45-de376e94eef8","Type":"ContainerDied","Data":"95952382b7442ee6cd9670d6ffecd0e85261b40ef9e2cc4dbb4ec758a117d8af"} Dec 10 00:45:42 crc kubenswrapper[4884]: I1210 00:45:42.759977 4884 generic.go:334] "Generic (PLEG): container finished" podID="37f7e3ef-f9a6-4975-ae21-bee745d3063d" containerID="8598540a5a91799966d3b30f0842477a6437a698168d10759fe4e964783c4b53" exitCode=0 Dec 10 00:45:42 crc kubenswrapper[4884]: I1210 00:45:42.760012 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" event={"ID":"37f7e3ef-f9a6-4975-ae21-bee745d3063d","Type":"ContainerDied","Data":"8598540a5a91799966d3b30f0842477a6437a698168d10759fe4e964783c4b53"} Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.080407 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.086152 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.113585 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qvntb\" (UniqueName: \"kubernetes.io/projected/37f7e3ef-f9a6-4975-ae21-bee745d3063d-kube-api-access-qvntb\") pod \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\" (UID: \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\") " Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.113719 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/37f7e3ef-f9a6-4975-ae21-bee745d3063d-bundle\") pod \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\" (UID: \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\") " Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.113750 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c514393e-bc0a-4da5-9b45-de376e94eef8-bundle\") pod \"c514393e-bc0a-4da5-9b45-de376e94eef8\" (UID: \"c514393e-bc0a-4da5-9b45-de376e94eef8\") " Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.113791 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wqd6s\" (UniqueName: \"kubernetes.io/projected/c514393e-bc0a-4da5-9b45-de376e94eef8-kube-api-access-wqd6s\") pod \"c514393e-bc0a-4da5-9b45-de376e94eef8\" (UID: \"c514393e-bc0a-4da5-9b45-de376e94eef8\") " Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.113827 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/37f7e3ef-f9a6-4975-ae21-bee745d3063d-util\") pod \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\" (UID: \"37f7e3ef-f9a6-4975-ae21-bee745d3063d\") " Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.113852 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c514393e-bc0a-4da5-9b45-de376e94eef8-util\") pod \"c514393e-bc0a-4da5-9b45-de376e94eef8\" (UID: \"c514393e-bc0a-4da5-9b45-de376e94eef8\") " Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.114809 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c514393e-bc0a-4da5-9b45-de376e94eef8-bundle" (OuterVolumeSpecName: "bundle") pod "c514393e-bc0a-4da5-9b45-de376e94eef8" (UID: "c514393e-bc0a-4da5-9b45-de376e94eef8"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.114914 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37f7e3ef-f9a6-4975-ae21-bee745d3063d-bundle" (OuterVolumeSpecName: "bundle") pod "37f7e3ef-f9a6-4975-ae21-bee745d3063d" (UID: "37f7e3ef-f9a6-4975-ae21-bee745d3063d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.119295 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c514393e-bc0a-4da5-9b45-de376e94eef8-kube-api-access-wqd6s" (OuterVolumeSpecName: "kube-api-access-wqd6s") pod "c514393e-bc0a-4da5-9b45-de376e94eef8" (UID: "c514393e-bc0a-4da5-9b45-de376e94eef8"). InnerVolumeSpecName "kube-api-access-wqd6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.122386 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37f7e3ef-f9a6-4975-ae21-bee745d3063d-kube-api-access-qvntb" (OuterVolumeSpecName: "kube-api-access-qvntb") pod "37f7e3ef-f9a6-4975-ae21-bee745d3063d" (UID: "37f7e3ef-f9a6-4975-ae21-bee745d3063d"). InnerVolumeSpecName "kube-api-access-qvntb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.215844 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qvntb\" (UniqueName: \"kubernetes.io/projected/37f7e3ef-f9a6-4975-ae21-bee745d3063d-kube-api-access-qvntb\") on node \"crc\" DevicePath \"\"" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.215890 4884 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/37f7e3ef-f9a6-4975-ae21-bee745d3063d-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.215906 4884 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c514393e-bc0a-4da5-9b45-de376e94eef8-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.215918 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wqd6s\" (UniqueName: \"kubernetes.io/projected/c514393e-bc0a-4da5-9b45-de376e94eef8-kube-api-access-wqd6s\") on node \"crc\" DevicePath \"\"" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.261944 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k554l"] Dec 10 00:45:44 crc kubenswrapper[4884]: E1210 00:45:44.262209 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c514393e-bc0a-4da5-9b45-de376e94eef8" containerName="pull" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.262236 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c514393e-bc0a-4da5-9b45-de376e94eef8" containerName="pull" Dec 10 00:45:44 crc kubenswrapper[4884]: E1210 00:45:44.262253 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c514393e-bc0a-4da5-9b45-de376e94eef8" containerName="util" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.262261 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c514393e-bc0a-4da5-9b45-de376e94eef8" containerName="util" Dec 10 00:45:44 crc kubenswrapper[4884]: E1210 00:45:44.262273 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37f7e3ef-f9a6-4975-ae21-bee745d3063d" containerName="util" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.262280 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="37f7e3ef-f9a6-4975-ae21-bee745d3063d" containerName="util" Dec 10 00:45:44 crc kubenswrapper[4884]: E1210 00:45:44.262291 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37f7e3ef-f9a6-4975-ae21-bee745d3063d" containerName="extract" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.262299 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="37f7e3ef-f9a6-4975-ae21-bee745d3063d" containerName="extract" Dec 10 00:45:44 crc kubenswrapper[4884]: E1210 00:45:44.262311 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37f7e3ef-f9a6-4975-ae21-bee745d3063d" containerName="pull" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.262318 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="37f7e3ef-f9a6-4975-ae21-bee745d3063d" containerName="pull" Dec 10 00:45:44 crc kubenswrapper[4884]: E1210 00:45:44.262329 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c514393e-bc0a-4da5-9b45-de376e94eef8" containerName="extract" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.262351 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c514393e-bc0a-4da5-9b45-de376e94eef8" containerName="extract" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.262470 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c514393e-bc0a-4da5-9b45-de376e94eef8" containerName="extract" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.262489 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="37f7e3ef-f9a6-4975-ae21-bee745d3063d" containerName="extract" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.263234 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.275470 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k554l"] Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.316702 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69afdd2b-638d-40e7-8618-b662d4770bec-utilities\") pod \"certified-operators-k554l\" (UID: \"69afdd2b-638d-40e7-8618-b662d4770bec\") " pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.316803 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69afdd2b-638d-40e7-8618-b662d4770bec-catalog-content\") pod \"certified-operators-k554l\" (UID: \"69afdd2b-638d-40e7-8618-b662d4770bec\") " pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.316835 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7cpf\" (UniqueName: \"kubernetes.io/projected/69afdd2b-638d-40e7-8618-b662d4770bec-kube-api-access-c7cpf\") pod \"certified-operators-k554l\" (UID: \"69afdd2b-638d-40e7-8618-b662d4770bec\") " pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.418259 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69afdd2b-638d-40e7-8618-b662d4770bec-utilities\") pod \"certified-operators-k554l\" (UID: \"69afdd2b-638d-40e7-8618-b662d4770bec\") " pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.418369 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69afdd2b-638d-40e7-8618-b662d4770bec-catalog-content\") pod \"certified-operators-k554l\" (UID: \"69afdd2b-638d-40e7-8618-b662d4770bec\") " pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.418402 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7cpf\" (UniqueName: \"kubernetes.io/projected/69afdd2b-638d-40e7-8618-b662d4770bec-kube-api-access-c7cpf\") pod \"certified-operators-k554l\" (UID: \"69afdd2b-638d-40e7-8618-b662d4770bec\") " pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.418855 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69afdd2b-638d-40e7-8618-b662d4770bec-utilities\") pod \"certified-operators-k554l\" (UID: \"69afdd2b-638d-40e7-8618-b662d4770bec\") " pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.418935 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69afdd2b-638d-40e7-8618-b662d4770bec-catalog-content\") pod \"certified-operators-k554l\" (UID: \"69afdd2b-638d-40e7-8618-b662d4770bec\") " pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.443442 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7cpf\" (UniqueName: \"kubernetes.io/projected/69afdd2b-638d-40e7-8618-b662d4770bec-kube-api-access-c7cpf\") pod \"certified-operators-k554l\" (UID: \"69afdd2b-638d-40e7-8618-b662d4770bec\") " pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.505653 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c514393e-bc0a-4da5-9b45-de376e94eef8-util" (OuterVolumeSpecName: "util") pod "c514393e-bc0a-4da5-9b45-de376e94eef8" (UID: "c514393e-bc0a-4da5-9b45-de376e94eef8"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.507625 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37f7e3ef-f9a6-4975-ae21-bee745d3063d-util" (OuterVolumeSpecName: "util") pod "37f7e3ef-f9a6-4975-ae21-bee745d3063d" (UID: "37f7e3ef-f9a6-4975-ae21-bee745d3063d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.519443 4884 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/37f7e3ef-f9a6-4975-ae21-bee745d3063d-util\") on node \"crc\" DevicePath \"\"" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.519485 4884 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c514393e-bc0a-4da5-9b45-de376e94eef8-util\") on node \"crc\" DevicePath \"\"" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.579662 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.774764 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" event={"ID":"c514393e-bc0a-4da5-9b45-de376e94eef8","Type":"ContainerDied","Data":"baeac411a9ad70d0f25dca9fe92eb340d4859173f7e47b4d69571914b7cdfcc4"} Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.774823 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="baeac411a9ad70d0f25dca9fe92eb340d4859173f7e47b4d69571914b7cdfcc4" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.774898 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.804811 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" event={"ID":"37f7e3ef-f9a6-4975-ae21-bee745d3063d","Type":"ContainerDied","Data":"5f8e799ce5fc2a319f877f539b01b5a6dbd78a0bf62e0fba89bfe1b92ad10e8b"} Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.804844 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f8e799ce5fc2a319f877f539b01b5a6dbd78a0bf62e0fba89bfe1b92ad10e8b" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.804902 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l" Dec 10 00:45:44 crc kubenswrapper[4884]: I1210 00:45:44.827724 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k554l"] Dec 10 00:45:44 crc kubenswrapper[4884]: W1210 00:45:44.840004 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69afdd2b_638d_40e7_8618_b662d4770bec.slice/crio-4dda4a9846ee3e71019d347d173cde3c2452855c985e8ba55d725a316ea63ecb WatchSource:0}: Error finding container 4dda4a9846ee3e71019d347d173cde3c2452855c985e8ba55d725a316ea63ecb: Status 404 returned error can't find the container with id 4dda4a9846ee3e71019d347d173cde3c2452855c985e8ba55d725a316ea63ecb Dec 10 00:45:45 crc kubenswrapper[4884]: I1210 00:45:45.813497 4884 generic.go:334] "Generic (PLEG): container finished" podID="69afdd2b-638d-40e7-8618-b662d4770bec" containerID="8edd877a6df61f89162901315bb41f140784d89a90bade8a2f60fa6206158765" exitCode=0 Dec 10 00:45:45 crc kubenswrapper[4884]: I1210 00:45:45.813605 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k554l" event={"ID":"69afdd2b-638d-40e7-8618-b662d4770bec","Type":"ContainerDied","Data":"8edd877a6df61f89162901315bb41f140784d89a90bade8a2f60fa6206158765"} Dec 10 00:45:45 crc kubenswrapper[4884]: I1210 00:45:45.813918 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k554l" event={"ID":"69afdd2b-638d-40e7-8618-b662d4770bec","Type":"ContainerStarted","Data":"4dda4a9846ee3e71019d347d173cde3c2452855c985e8ba55d725a316ea63ecb"} Dec 10 00:45:46 crc kubenswrapper[4884]: I1210 00:45:46.820975 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k554l" event={"ID":"69afdd2b-638d-40e7-8618-b662d4770bec","Type":"ContainerStarted","Data":"07a29596fe7cab12dc24b98af06d7b92d3eab3a60eae4f76a1613e32010ed85d"} Dec 10 00:45:47 crc kubenswrapper[4884]: I1210 00:45:47.829881 4884 generic.go:334] "Generic (PLEG): container finished" podID="69afdd2b-638d-40e7-8618-b662d4770bec" containerID="07a29596fe7cab12dc24b98af06d7b92d3eab3a60eae4f76a1613e32010ed85d" exitCode=0 Dec 10 00:45:47 crc kubenswrapper[4884]: I1210 00:45:47.829945 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k554l" event={"ID":"69afdd2b-638d-40e7-8618-b662d4770bec","Type":"ContainerDied","Data":"07a29596fe7cab12dc24b98af06d7b92d3eab3a60eae4f76a1613e32010ed85d"} Dec 10 00:45:48 crc kubenswrapper[4884]: I1210 00:45:48.098414 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:45:48 crc kubenswrapper[4884]: I1210 00:45:48.098493 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:45:48 crc kubenswrapper[4884]: I1210 00:45:48.400755 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-w84c2"] Dec 10 00:45:48 crc kubenswrapper[4884]: I1210 00:45:48.401760 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-w84c2" Dec 10 00:45:48 crc kubenswrapper[4884]: W1210 00:45:48.403065 4884 reflector.go:561] object-"openshift-logging"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-logging": no relationship found between node 'crc' and this object Dec 10 00:45:48 crc kubenswrapper[4884]: E1210 00:45:48.403116 4884 reflector.go:158] "Unhandled Error" err="object-\"openshift-logging\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-logging\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 10 00:45:48 crc kubenswrapper[4884]: I1210 00:45:48.403608 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"kube-root-ca.crt" Dec 10 00:45:48 crc kubenswrapper[4884]: I1210 00:45:48.404476 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"cluster-logging-operator-dockercfg-4n8z5" Dec 10 00:45:48 crc kubenswrapper[4884]: I1210 00:45:48.419417 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-w84c2"] Dec 10 00:45:48 crc kubenswrapper[4884]: I1210 00:45:48.474737 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8xsw\" (UniqueName: \"kubernetes.io/projected/3baeade4-d934-423b-be72-b94c23e737ba-kube-api-access-t8xsw\") pod \"cluster-logging-operator-ff9846bd-w84c2\" (UID: \"3baeade4-d934-423b-be72-b94c23e737ba\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-w84c2" Dec 10 00:45:48 crc kubenswrapper[4884]: I1210 00:45:48.575847 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8xsw\" (UniqueName: \"kubernetes.io/projected/3baeade4-d934-423b-be72-b94c23e737ba-kube-api-access-t8xsw\") pod \"cluster-logging-operator-ff9846bd-w84c2\" (UID: \"3baeade4-d934-423b-be72-b94c23e737ba\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-w84c2" Dec 10 00:45:48 crc kubenswrapper[4884]: I1210 00:45:48.839683 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k554l" event={"ID":"69afdd2b-638d-40e7-8618-b662d4770bec","Type":"ContainerStarted","Data":"37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6"} Dec 10 00:45:48 crc kubenswrapper[4884]: I1210 00:45:48.858213 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k554l" podStartSLOduration=2.424565499 podStartE2EDuration="4.858195193s" podCreationTimestamp="2025-12-10 00:45:44 +0000 UTC" firstStartedPulling="2025-12-10 00:45:45.816678059 +0000 UTC m=+918.894635196" lastFinishedPulling="2025-12-10 00:45:48.250307763 +0000 UTC m=+921.328264890" observedRunningTime="2025-12-10 00:45:48.855296998 +0000 UTC m=+921.933254135" watchObservedRunningTime="2025-12-10 00:45:48.858195193 +0000 UTC m=+921.936152310" Dec 10 00:45:49 crc kubenswrapper[4884]: E1210 00:45:49.593120 4884 projected.go:288] Couldn't get configMap openshift-logging/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Dec 10 00:45:49 crc kubenswrapper[4884]: E1210 00:45:49.593167 4884 projected.go:194] Error preparing data for projected volume kube-api-access-t8xsw for pod openshift-logging/cluster-logging-operator-ff9846bd-w84c2: failed to sync configmap cache: timed out waiting for the condition Dec 10 00:45:49 crc kubenswrapper[4884]: E1210 00:45:49.593241 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3baeade4-d934-423b-be72-b94c23e737ba-kube-api-access-t8xsw podName:3baeade4-d934-423b-be72-b94c23e737ba nodeName:}" failed. No retries permitted until 2025-12-10 00:45:50.093218347 +0000 UTC m=+923.171175464 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-t8xsw" (UniqueName: "kubernetes.io/projected/3baeade4-d934-423b-be72-b94c23e737ba-kube-api-access-t8xsw") pod "cluster-logging-operator-ff9846bd-w84c2" (UID: "3baeade4-d934-423b-be72-b94c23e737ba") : failed to sync configmap cache: timed out waiting for the condition Dec 10 00:45:49 crc kubenswrapper[4884]: I1210 00:45:49.873501 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"openshift-service-ca.crt" Dec 10 00:45:50 crc kubenswrapper[4884]: I1210 00:45:50.093477 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8xsw\" (UniqueName: \"kubernetes.io/projected/3baeade4-d934-423b-be72-b94c23e737ba-kube-api-access-t8xsw\") pod \"cluster-logging-operator-ff9846bd-w84c2\" (UID: \"3baeade4-d934-423b-be72-b94c23e737ba\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-w84c2" Dec 10 00:45:50 crc kubenswrapper[4884]: I1210 00:45:50.103705 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8xsw\" (UniqueName: \"kubernetes.io/projected/3baeade4-d934-423b-be72-b94c23e737ba-kube-api-access-t8xsw\") pod \"cluster-logging-operator-ff9846bd-w84c2\" (UID: \"3baeade4-d934-423b-be72-b94c23e737ba\") " pod="openshift-logging/cluster-logging-operator-ff9846bd-w84c2" Dec 10 00:45:50 crc kubenswrapper[4884]: I1210 00:45:50.216265 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/cluster-logging-operator-ff9846bd-w84c2" Dec 10 00:45:50 crc kubenswrapper[4884]: I1210 00:45:50.458206 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/cluster-logging-operator-ff9846bd-w84c2"] Dec 10 00:45:50 crc kubenswrapper[4884]: I1210 00:45:50.853738 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-w84c2" event={"ID":"3baeade4-d934-423b-be72-b94c23e737ba","Type":"ContainerStarted","Data":"43acdc6a4897fe080eb41e5b4ea96ae112fa7951089285c37f6ebfa6b3c29991"} Dec 10 00:45:54 crc kubenswrapper[4884]: I1210 00:45:54.580719 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:54 crc kubenswrapper[4884]: I1210 00:45:54.580789 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:54 crc kubenswrapper[4884]: I1210 00:45:54.632499 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:54 crc kubenswrapper[4884]: I1210 00:45:54.919779 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.456909 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k554l"] Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.457700 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k554l" podUID="69afdd2b-638d-40e7-8618-b662d4770bec" containerName="registry-server" containerID="cri-o://37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6" gracePeriod=2 Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.690521 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc"] Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.691665 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.693770 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-metrics" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.693908 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-service-cert" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.694073 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"openshift-service-ca.crt" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.694452 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"loki-operator-manager-config" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.695674 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators-redhat"/"loki-operator-controller-manager-dockercfg-l58wg" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.695808 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators-redhat"/"kube-root-ca.crt" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.708305 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc"] Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.841076 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0495f56c-e0ff-4bb7-861c-21754379af3f-webhook-cert\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.841172 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0495f56c-e0ff-4bb7-861c-21754379af3f-apiservice-cert\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.841236 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4vxk\" (UniqueName: \"kubernetes.io/projected/0495f56c-e0ff-4bb7-861c-21754379af3f-kube-api-access-n4vxk\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.841307 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0495f56c-e0ff-4bb7-861c-21754379af3f-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.841336 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/0495f56c-e0ff-4bb7-861c-21754379af3f-manager-config\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.902067 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/cluster-logging-operator-ff9846bd-w84c2" event={"ID":"3baeade4-d934-423b-be72-b94c23e737ba","Type":"ContainerStarted","Data":"ffd367ed1d5ffd594401fc2406711fee2522ba5d5d4738ffe478f55c36487650"} Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.942865 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0495f56c-e0ff-4bb7-861c-21754379af3f-apiservice-cert\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.942930 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4vxk\" (UniqueName: \"kubernetes.io/projected/0495f56c-e0ff-4bb7-861c-21754379af3f-kube-api-access-n4vxk\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.942967 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0495f56c-e0ff-4bb7-861c-21754379af3f-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.942984 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/0495f56c-e0ff-4bb7-861c-21754379af3f-manager-config\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.943015 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0495f56c-e0ff-4bb7-861c-21754379af3f-webhook-cert\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.944019 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manager-config\" (UniqueName: \"kubernetes.io/configmap/0495f56c-e0ff-4bb7-861c-21754379af3f-manager-config\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.950220 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0495f56c-e0ff-4bb7-861c-21754379af3f-webhook-cert\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.952009 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0495f56c-e0ff-4bb7-861c-21754379af3f-apiservice-cert\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.954471 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"loki-operator-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0495f56c-e0ff-4bb7-861c-21754379af3f-loki-operator-metrics-cert\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:57 crc kubenswrapper[4884]: I1210 00:45:57.977205 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4vxk\" (UniqueName: \"kubernetes.io/projected/0495f56c-e0ff-4bb7-861c-21754379af3f-kube-api-access-n4vxk\") pod \"loki-operator-controller-manager-887bbc66c-sjvbc\" (UID: \"0495f56c-e0ff-4bb7-861c-21754379af3f\") " pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.005360 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.373923 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc"] Dec 10 00:45:58 crc kubenswrapper[4884]: W1210 00:45:58.398284 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0495f56c_e0ff_4bb7_861c_21754379af3f.slice/crio-bf6fb82ee8c656c8be3fa625c1a28771deb3b0ec62e653efaff489318eed1c7c WatchSource:0}: Error finding container bf6fb82ee8c656c8be3fa625c1a28771deb3b0ec62e653efaff489318eed1c7c: Status 404 returned error can't find the container with id bf6fb82ee8c656c8be3fa625c1a28771deb3b0ec62e653efaff489318eed1c7c Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.411345 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.586332 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7cpf\" (UniqueName: \"kubernetes.io/projected/69afdd2b-638d-40e7-8618-b662d4770bec-kube-api-access-c7cpf\") pod \"69afdd2b-638d-40e7-8618-b662d4770bec\" (UID: \"69afdd2b-638d-40e7-8618-b662d4770bec\") " Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.586389 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69afdd2b-638d-40e7-8618-b662d4770bec-utilities\") pod \"69afdd2b-638d-40e7-8618-b662d4770bec\" (UID: \"69afdd2b-638d-40e7-8618-b662d4770bec\") " Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.586460 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69afdd2b-638d-40e7-8618-b662d4770bec-catalog-content\") pod \"69afdd2b-638d-40e7-8618-b662d4770bec\" (UID: \"69afdd2b-638d-40e7-8618-b662d4770bec\") " Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.587929 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69afdd2b-638d-40e7-8618-b662d4770bec-utilities" (OuterVolumeSpecName: "utilities") pod "69afdd2b-638d-40e7-8618-b662d4770bec" (UID: "69afdd2b-638d-40e7-8618-b662d4770bec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.595600 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69afdd2b-638d-40e7-8618-b662d4770bec-kube-api-access-c7cpf" (OuterVolumeSpecName: "kube-api-access-c7cpf") pod "69afdd2b-638d-40e7-8618-b662d4770bec" (UID: "69afdd2b-638d-40e7-8618-b662d4770bec"). InnerVolumeSpecName "kube-api-access-c7cpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.632323 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69afdd2b-638d-40e7-8618-b662d4770bec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "69afdd2b-638d-40e7-8618-b662d4770bec" (UID: "69afdd2b-638d-40e7-8618-b662d4770bec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.688388 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7cpf\" (UniqueName: \"kubernetes.io/projected/69afdd2b-638d-40e7-8618-b662d4770bec-kube-api-access-c7cpf\") on node \"crc\" DevicePath \"\"" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.688475 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69afdd2b-638d-40e7-8618-b662d4770bec-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.688493 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69afdd2b-638d-40e7-8618-b662d4770bec-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.909958 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" event={"ID":"0495f56c-e0ff-4bb7-861c-21754379af3f","Type":"ContainerStarted","Data":"bf6fb82ee8c656c8be3fa625c1a28771deb3b0ec62e653efaff489318eed1c7c"} Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.913155 4884 generic.go:334] "Generic (PLEG): container finished" podID="69afdd2b-638d-40e7-8618-b662d4770bec" containerID="37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6" exitCode=0 Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.913272 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k554l" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.913274 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k554l" event={"ID":"69afdd2b-638d-40e7-8618-b662d4770bec","Type":"ContainerDied","Data":"37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6"} Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.913526 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k554l" event={"ID":"69afdd2b-638d-40e7-8618-b662d4770bec","Type":"ContainerDied","Data":"4dda4a9846ee3e71019d347d173cde3c2452855c985e8ba55d725a316ea63ecb"} Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.913568 4884 scope.go:117] "RemoveContainer" containerID="37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.933653 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/cluster-logging-operator-ff9846bd-w84c2" podStartSLOduration=3.991674515 podStartE2EDuration="10.933633398s" podCreationTimestamp="2025-12-10 00:45:48 +0000 UTC" firstStartedPulling="2025-12-10 00:45:50.471223785 +0000 UTC m=+923.549180912" lastFinishedPulling="2025-12-10 00:45:57.413182678 +0000 UTC m=+930.491139795" observedRunningTime="2025-12-10 00:45:58.928607037 +0000 UTC m=+932.006564164" watchObservedRunningTime="2025-12-10 00:45:58.933633398 +0000 UTC m=+932.011590515" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.933994 4884 scope.go:117] "RemoveContainer" containerID="07a29596fe7cab12dc24b98af06d7b92d3eab3a60eae4f76a1613e32010ed85d" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.954674 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k554l"] Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.957620 4884 scope.go:117] "RemoveContainer" containerID="8edd877a6df61f89162901315bb41f140784d89a90bade8a2f60fa6206158765" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.960207 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k554l"] Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.970606 4884 scope.go:117] "RemoveContainer" containerID="37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6" Dec 10 00:45:58 crc kubenswrapper[4884]: E1210 00:45:58.970964 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6\": container with ID starting with 37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6 not found: ID does not exist" containerID="37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.971000 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6"} err="failed to get container status \"37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6\": rpc error: code = NotFound desc = could not find container \"37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6\": container with ID starting with 37ac1c2defa9c03ed0287c8042fb6d81a0e570beb7569fc110f7d78b695613d6 not found: ID does not exist" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.971026 4884 scope.go:117] "RemoveContainer" containerID="07a29596fe7cab12dc24b98af06d7b92d3eab3a60eae4f76a1613e32010ed85d" Dec 10 00:45:58 crc kubenswrapper[4884]: E1210 00:45:58.971357 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07a29596fe7cab12dc24b98af06d7b92d3eab3a60eae4f76a1613e32010ed85d\": container with ID starting with 07a29596fe7cab12dc24b98af06d7b92d3eab3a60eae4f76a1613e32010ed85d not found: ID does not exist" containerID="07a29596fe7cab12dc24b98af06d7b92d3eab3a60eae4f76a1613e32010ed85d" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.971392 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07a29596fe7cab12dc24b98af06d7b92d3eab3a60eae4f76a1613e32010ed85d"} err="failed to get container status \"07a29596fe7cab12dc24b98af06d7b92d3eab3a60eae4f76a1613e32010ed85d\": rpc error: code = NotFound desc = could not find container \"07a29596fe7cab12dc24b98af06d7b92d3eab3a60eae4f76a1613e32010ed85d\": container with ID starting with 07a29596fe7cab12dc24b98af06d7b92d3eab3a60eae4f76a1613e32010ed85d not found: ID does not exist" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.971404 4884 scope.go:117] "RemoveContainer" containerID="8edd877a6df61f89162901315bb41f140784d89a90bade8a2f60fa6206158765" Dec 10 00:45:58 crc kubenswrapper[4884]: E1210 00:45:58.971629 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8edd877a6df61f89162901315bb41f140784d89a90bade8a2f60fa6206158765\": container with ID starting with 8edd877a6df61f89162901315bb41f140784d89a90bade8a2f60fa6206158765 not found: ID does not exist" containerID="8edd877a6df61f89162901315bb41f140784d89a90bade8a2f60fa6206158765" Dec 10 00:45:58 crc kubenswrapper[4884]: I1210 00:45:58.971647 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8edd877a6df61f89162901315bb41f140784d89a90bade8a2f60fa6206158765"} err="failed to get container status \"8edd877a6df61f89162901315bb41f140784d89a90bade8a2f60fa6206158765\": rpc error: code = NotFound desc = could not find container \"8edd877a6df61f89162901315bb41f140784d89a90bade8a2f60fa6206158765\": container with ID starting with 8edd877a6df61f89162901315bb41f140784d89a90bade8a2f60fa6206158765 not found: ID does not exist" Dec 10 00:45:59 crc kubenswrapper[4884]: I1210 00:45:59.298038 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69afdd2b-638d-40e7-8618-b662d4770bec" path="/var/lib/kubelet/pods/69afdd2b-638d-40e7-8618-b662d4770bec/volumes" Dec 10 00:46:00 crc kubenswrapper[4884]: I1210 00:46:00.861516 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-c5phr"] Dec 10 00:46:00 crc kubenswrapper[4884]: E1210 00:46:00.861969 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69afdd2b-638d-40e7-8618-b662d4770bec" containerName="extract-content" Dec 10 00:46:00 crc kubenswrapper[4884]: I1210 00:46:00.861982 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="69afdd2b-638d-40e7-8618-b662d4770bec" containerName="extract-content" Dec 10 00:46:00 crc kubenswrapper[4884]: E1210 00:46:00.861992 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69afdd2b-638d-40e7-8618-b662d4770bec" containerName="registry-server" Dec 10 00:46:00 crc kubenswrapper[4884]: I1210 00:46:00.861999 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="69afdd2b-638d-40e7-8618-b662d4770bec" containerName="registry-server" Dec 10 00:46:00 crc kubenswrapper[4884]: E1210 00:46:00.862020 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69afdd2b-638d-40e7-8618-b662d4770bec" containerName="extract-utilities" Dec 10 00:46:00 crc kubenswrapper[4884]: I1210 00:46:00.862028 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="69afdd2b-638d-40e7-8618-b662d4770bec" containerName="extract-utilities" Dec 10 00:46:00 crc kubenswrapper[4884]: I1210 00:46:00.862146 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="69afdd2b-638d-40e7-8618-b662d4770bec" containerName="registry-server" Dec 10 00:46:00 crc kubenswrapper[4884]: I1210 00:46:00.863083 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:00 crc kubenswrapper[4884]: I1210 00:46:00.869583 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c5phr"] Dec 10 00:46:01 crc kubenswrapper[4884]: I1210 00:46:01.021448 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqd59\" (UniqueName: \"kubernetes.io/projected/61cdc048-68cf-48c5-95d6-0515b9eae10d-kube-api-access-qqd59\") pod \"community-operators-c5phr\" (UID: \"61cdc048-68cf-48c5-95d6-0515b9eae10d\") " pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:01 crc kubenswrapper[4884]: I1210 00:46:01.021528 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61cdc048-68cf-48c5-95d6-0515b9eae10d-catalog-content\") pod \"community-operators-c5phr\" (UID: \"61cdc048-68cf-48c5-95d6-0515b9eae10d\") " pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:01 crc kubenswrapper[4884]: I1210 00:46:01.021581 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61cdc048-68cf-48c5-95d6-0515b9eae10d-utilities\") pod \"community-operators-c5phr\" (UID: \"61cdc048-68cf-48c5-95d6-0515b9eae10d\") " pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:01 crc kubenswrapper[4884]: I1210 00:46:01.122336 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61cdc048-68cf-48c5-95d6-0515b9eae10d-catalog-content\") pod \"community-operators-c5phr\" (UID: \"61cdc048-68cf-48c5-95d6-0515b9eae10d\") " pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:01 crc kubenswrapper[4884]: I1210 00:46:01.122475 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61cdc048-68cf-48c5-95d6-0515b9eae10d-utilities\") pod \"community-operators-c5phr\" (UID: \"61cdc048-68cf-48c5-95d6-0515b9eae10d\") " pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:01 crc kubenswrapper[4884]: I1210 00:46:01.122572 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqd59\" (UniqueName: \"kubernetes.io/projected/61cdc048-68cf-48c5-95d6-0515b9eae10d-kube-api-access-qqd59\") pod \"community-operators-c5phr\" (UID: \"61cdc048-68cf-48c5-95d6-0515b9eae10d\") " pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:01 crc kubenswrapper[4884]: I1210 00:46:01.122847 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61cdc048-68cf-48c5-95d6-0515b9eae10d-catalog-content\") pod \"community-operators-c5phr\" (UID: \"61cdc048-68cf-48c5-95d6-0515b9eae10d\") " pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:01 crc kubenswrapper[4884]: I1210 00:46:01.122873 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61cdc048-68cf-48c5-95d6-0515b9eae10d-utilities\") pod \"community-operators-c5phr\" (UID: \"61cdc048-68cf-48c5-95d6-0515b9eae10d\") " pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:01 crc kubenswrapper[4884]: I1210 00:46:01.149512 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqd59\" (UniqueName: \"kubernetes.io/projected/61cdc048-68cf-48c5-95d6-0515b9eae10d-kube-api-access-qqd59\") pod \"community-operators-c5phr\" (UID: \"61cdc048-68cf-48c5-95d6-0515b9eae10d\") " pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:01 crc kubenswrapper[4884]: I1210 00:46:01.180470 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:02 crc kubenswrapper[4884]: I1210 00:46:02.294560 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-c5phr"] Dec 10 00:46:02 crc kubenswrapper[4884]: W1210 00:46:02.312592 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod61cdc048_68cf_48c5_95d6_0515b9eae10d.slice/crio-19fa10d82453cfdceb6869c17b5165c88a0548580ebb8aa3d632146f1a711a56 WatchSource:0}: Error finding container 19fa10d82453cfdceb6869c17b5165c88a0548580ebb8aa3d632146f1a711a56: Status 404 returned error can't find the container with id 19fa10d82453cfdceb6869c17b5165c88a0548580ebb8aa3d632146f1a711a56 Dec 10 00:46:02 crc kubenswrapper[4884]: I1210 00:46:02.952258 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" event={"ID":"0495f56c-e0ff-4bb7-861c-21754379af3f","Type":"ContainerStarted","Data":"3c75d88454a05a0c2b3883adb9da00a369091ec7a61de06f4761a9c37cf68ca8"} Dec 10 00:46:02 crc kubenswrapper[4884]: I1210 00:46:02.954179 4884 generic.go:334] "Generic (PLEG): container finished" podID="61cdc048-68cf-48c5-95d6-0515b9eae10d" containerID="22e416b8866572c725bf4fa37e80bc7d58dd00ff8acdf73f8e9f2d806b3599ff" exitCode=0 Dec 10 00:46:02 crc kubenswrapper[4884]: I1210 00:46:02.954205 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5phr" event={"ID":"61cdc048-68cf-48c5-95d6-0515b9eae10d","Type":"ContainerDied","Data":"22e416b8866572c725bf4fa37e80bc7d58dd00ff8acdf73f8e9f2d806b3599ff"} Dec 10 00:46:02 crc kubenswrapper[4884]: I1210 00:46:02.954220 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5phr" event={"ID":"61cdc048-68cf-48c5-95d6-0515b9eae10d","Type":"ContainerStarted","Data":"19fa10d82453cfdceb6869c17b5165c88a0548580ebb8aa3d632146f1a711a56"} Dec 10 00:46:06 crc kubenswrapper[4884]: I1210 00:46:06.978669 4884 generic.go:334] "Generic (PLEG): container finished" podID="61cdc048-68cf-48c5-95d6-0515b9eae10d" containerID="6dcd856037c529014a50b05d021f9486e5af58fb5b35dc0809375caceb111a63" exitCode=0 Dec 10 00:46:06 crc kubenswrapper[4884]: I1210 00:46:06.979110 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5phr" event={"ID":"61cdc048-68cf-48c5-95d6-0515b9eae10d","Type":"ContainerDied","Data":"6dcd856037c529014a50b05d021f9486e5af58fb5b35dc0809375caceb111a63"} Dec 10 00:46:08 crc kubenswrapper[4884]: I1210 00:46:08.003215 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5phr" event={"ID":"61cdc048-68cf-48c5-95d6-0515b9eae10d","Type":"ContainerStarted","Data":"e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6"} Dec 10 00:46:08 crc kubenswrapper[4884]: I1210 00:46:08.025873 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-c5phr" podStartSLOduration=3.318209581 podStartE2EDuration="8.025855232s" podCreationTimestamp="2025-12-10 00:46:00 +0000 UTC" firstStartedPulling="2025-12-10 00:46:02.956033833 +0000 UTC m=+936.033990950" lastFinishedPulling="2025-12-10 00:46:07.663679484 +0000 UTC m=+940.741636601" observedRunningTime="2025-12-10 00:46:08.023050499 +0000 UTC m=+941.101007616" watchObservedRunningTime="2025-12-10 00:46:08.025855232 +0000 UTC m=+941.103812349" Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.465063 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nqc2n"] Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.466519 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.477882 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nqc2n"] Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.506021 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvqg9\" (UniqueName: \"kubernetes.io/projected/d4eb6c3d-656f-4e15-b086-6b2535180118-kube-api-access-zvqg9\") pod \"redhat-marketplace-nqc2n\" (UID: \"d4eb6c3d-656f-4e15-b086-6b2535180118\") " pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.506068 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4eb6c3d-656f-4e15-b086-6b2535180118-utilities\") pod \"redhat-marketplace-nqc2n\" (UID: \"d4eb6c3d-656f-4e15-b086-6b2535180118\") " pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.506090 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4eb6c3d-656f-4e15-b086-6b2535180118-catalog-content\") pod \"redhat-marketplace-nqc2n\" (UID: \"d4eb6c3d-656f-4e15-b086-6b2535180118\") " pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.607674 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvqg9\" (UniqueName: \"kubernetes.io/projected/d4eb6c3d-656f-4e15-b086-6b2535180118-kube-api-access-zvqg9\") pod \"redhat-marketplace-nqc2n\" (UID: \"d4eb6c3d-656f-4e15-b086-6b2535180118\") " pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.607726 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4eb6c3d-656f-4e15-b086-6b2535180118-utilities\") pod \"redhat-marketplace-nqc2n\" (UID: \"d4eb6c3d-656f-4e15-b086-6b2535180118\") " pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.607755 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4eb6c3d-656f-4e15-b086-6b2535180118-catalog-content\") pod \"redhat-marketplace-nqc2n\" (UID: \"d4eb6c3d-656f-4e15-b086-6b2535180118\") " pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.608154 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4eb6c3d-656f-4e15-b086-6b2535180118-catalog-content\") pod \"redhat-marketplace-nqc2n\" (UID: \"d4eb6c3d-656f-4e15-b086-6b2535180118\") " pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.608412 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4eb6c3d-656f-4e15-b086-6b2535180118-utilities\") pod \"redhat-marketplace-nqc2n\" (UID: \"d4eb6c3d-656f-4e15-b086-6b2535180118\") " pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.626558 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvqg9\" (UniqueName: \"kubernetes.io/projected/d4eb6c3d-656f-4e15-b086-6b2535180118-kube-api-access-zvqg9\") pod \"redhat-marketplace-nqc2n\" (UID: \"d4eb6c3d-656f-4e15-b086-6b2535180118\") " pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:10 crc kubenswrapper[4884]: I1210 00:46:10.828642 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:11 crc kubenswrapper[4884]: I1210 00:46:11.180542 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:11 crc kubenswrapper[4884]: I1210 00:46:11.180603 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:11 crc kubenswrapper[4884]: I1210 00:46:11.263136 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:11 crc kubenswrapper[4884]: I1210 00:46:11.365962 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nqc2n"] Dec 10 00:46:12 crc kubenswrapper[4884]: I1210 00:46:12.030759 4884 generic.go:334] "Generic (PLEG): container finished" podID="d4eb6c3d-656f-4e15-b086-6b2535180118" containerID="c0efffaaae02c475d4b72d6a40bcd353aae0b5b488981cbeb2a51d442c476eea" exitCode=0 Dec 10 00:46:12 crc kubenswrapper[4884]: I1210 00:46:12.030881 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nqc2n" event={"ID":"d4eb6c3d-656f-4e15-b086-6b2535180118","Type":"ContainerDied","Data":"c0efffaaae02c475d4b72d6a40bcd353aae0b5b488981cbeb2a51d442c476eea"} Dec 10 00:46:12 crc kubenswrapper[4884]: I1210 00:46:12.031134 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nqc2n" event={"ID":"d4eb6c3d-656f-4e15-b086-6b2535180118","Type":"ContainerStarted","Data":"9598e9a03522d787658c479f2a78b7f3547cb2f23ff439adf4054b7d89447819"} Dec 10 00:46:12 crc kubenswrapper[4884]: I1210 00:46:12.033747 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" event={"ID":"0495f56c-e0ff-4bb7-861c-21754379af3f","Type":"ContainerStarted","Data":"984b9196f2d63bb90fbf723bafd91b8b4660297dcc5b99c21d12a7335a32eff0"} Dec 10 00:46:12 crc kubenswrapper[4884]: I1210 00:46:12.084177 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" podStartSLOduration=2.231628912 podStartE2EDuration="15.084160162s" podCreationTimestamp="2025-12-10 00:45:57 +0000 UTC" firstStartedPulling="2025-12-10 00:45:58.405148096 +0000 UTC m=+931.483105203" lastFinishedPulling="2025-12-10 00:46:11.257679346 +0000 UTC m=+944.335636453" observedRunningTime="2025-12-10 00:46:12.083080934 +0000 UTC m=+945.161038091" watchObservedRunningTime="2025-12-10 00:46:12.084160162 +0000 UTC m=+945.162117279" Dec 10 00:46:12 crc kubenswrapper[4884]: I1210 00:46:12.091022 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:13 crc kubenswrapper[4884]: I1210 00:46:13.040167 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:46:13 crc kubenswrapper[4884]: I1210 00:46:13.041854 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators-redhat/loki-operator-controller-manager-887bbc66c-sjvbc" Dec 10 00:46:14 crc kubenswrapper[4884]: I1210 00:46:14.048344 4884 generic.go:334] "Generic (PLEG): container finished" podID="d4eb6c3d-656f-4e15-b086-6b2535180118" containerID="194bc389ba73d73ff44ad409d37dbcfb430a9800224744259c7cc1ca667483c7" exitCode=0 Dec 10 00:46:14 crc kubenswrapper[4884]: I1210 00:46:14.048481 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nqc2n" event={"ID":"d4eb6c3d-656f-4e15-b086-6b2535180118","Type":"ContainerDied","Data":"194bc389ba73d73ff44ad409d37dbcfb430a9800224744259c7cc1ca667483c7"} Dec 10 00:46:15 crc kubenswrapper[4884]: I1210 00:46:15.057469 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nqc2n" event={"ID":"d4eb6c3d-656f-4e15-b086-6b2535180118","Type":"ContainerStarted","Data":"adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0"} Dec 10 00:46:15 crc kubenswrapper[4884]: I1210 00:46:15.095381 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nqc2n" podStartSLOduration=2.462248174 podStartE2EDuration="5.095364937s" podCreationTimestamp="2025-12-10 00:46:10 +0000 UTC" firstStartedPulling="2025-12-10 00:46:12.03263981 +0000 UTC m=+945.110596927" lastFinishedPulling="2025-12-10 00:46:14.665756553 +0000 UTC m=+947.743713690" observedRunningTime="2025-12-10 00:46:15.093129139 +0000 UTC m=+948.171086266" watchObservedRunningTime="2025-12-10 00:46:15.095364937 +0000 UTC m=+948.173322054" Dec 10 00:46:15 crc kubenswrapper[4884]: I1210 00:46:15.457222 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c5phr"] Dec 10 00:46:15 crc kubenswrapper[4884]: I1210 00:46:15.457504 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-c5phr" podUID="61cdc048-68cf-48c5-95d6-0515b9eae10d" containerName="registry-server" containerID="cri-o://e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6" gracePeriod=2 Dec 10 00:46:15 crc kubenswrapper[4884]: I1210 00:46:15.881234 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:15 crc kubenswrapper[4884]: I1210 00:46:15.914842 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61cdc048-68cf-48c5-95d6-0515b9eae10d-catalog-content\") pod \"61cdc048-68cf-48c5-95d6-0515b9eae10d\" (UID: \"61cdc048-68cf-48c5-95d6-0515b9eae10d\") " Dec 10 00:46:15 crc kubenswrapper[4884]: I1210 00:46:15.914887 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61cdc048-68cf-48c5-95d6-0515b9eae10d-utilities\") pod \"61cdc048-68cf-48c5-95d6-0515b9eae10d\" (UID: \"61cdc048-68cf-48c5-95d6-0515b9eae10d\") " Dec 10 00:46:15 crc kubenswrapper[4884]: I1210 00:46:15.914940 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqd59\" (UniqueName: \"kubernetes.io/projected/61cdc048-68cf-48c5-95d6-0515b9eae10d-kube-api-access-qqd59\") pod \"61cdc048-68cf-48c5-95d6-0515b9eae10d\" (UID: \"61cdc048-68cf-48c5-95d6-0515b9eae10d\") " Dec 10 00:46:15 crc kubenswrapper[4884]: I1210 00:46:15.916057 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61cdc048-68cf-48c5-95d6-0515b9eae10d-utilities" (OuterVolumeSpecName: "utilities") pod "61cdc048-68cf-48c5-95d6-0515b9eae10d" (UID: "61cdc048-68cf-48c5-95d6-0515b9eae10d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:46:15 crc kubenswrapper[4884]: I1210 00:46:15.931682 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61cdc048-68cf-48c5-95d6-0515b9eae10d-kube-api-access-qqd59" (OuterVolumeSpecName: "kube-api-access-qqd59") pod "61cdc048-68cf-48c5-95d6-0515b9eae10d" (UID: "61cdc048-68cf-48c5-95d6-0515b9eae10d"). InnerVolumeSpecName "kube-api-access-qqd59". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:46:15 crc kubenswrapper[4884]: I1210 00:46:15.997629 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61cdc048-68cf-48c5-95d6-0515b9eae10d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "61cdc048-68cf-48c5-95d6-0515b9eae10d" (UID: "61cdc048-68cf-48c5-95d6-0515b9eae10d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.016167 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61cdc048-68cf-48c5-95d6-0515b9eae10d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.016208 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61cdc048-68cf-48c5-95d6-0515b9eae10d-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.016218 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqd59\" (UniqueName: \"kubernetes.io/projected/61cdc048-68cf-48c5-95d6-0515b9eae10d-kube-api-access-qqd59\") on node \"crc\" DevicePath \"\"" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.064622 4884 generic.go:334] "Generic (PLEG): container finished" podID="61cdc048-68cf-48c5-95d6-0515b9eae10d" containerID="e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6" exitCode=0 Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.064673 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-c5phr" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.064665 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5phr" event={"ID":"61cdc048-68cf-48c5-95d6-0515b9eae10d","Type":"ContainerDied","Data":"e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6"} Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.064789 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-c5phr" event={"ID":"61cdc048-68cf-48c5-95d6-0515b9eae10d","Type":"ContainerDied","Data":"19fa10d82453cfdceb6869c17b5165c88a0548580ebb8aa3d632146f1a711a56"} Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.064809 4884 scope.go:117] "RemoveContainer" containerID="e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.084710 4884 scope.go:117] "RemoveContainer" containerID="6dcd856037c529014a50b05d021f9486e5af58fb5b35dc0809375caceb111a63" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.101030 4884 scope.go:117] "RemoveContainer" containerID="22e416b8866572c725bf4fa37e80bc7d58dd00ff8acdf73f8e9f2d806b3599ff" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.106920 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-c5phr"] Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.111524 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-c5phr"] Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.119861 4884 scope.go:117] "RemoveContainer" containerID="e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6" Dec 10 00:46:16 crc kubenswrapper[4884]: E1210 00:46:16.122064 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6\": container with ID starting with e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6 not found: ID does not exist" containerID="e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.122193 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6"} err="failed to get container status \"e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6\": rpc error: code = NotFound desc = could not find container \"e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6\": container with ID starting with e77006c40e3dfe8172dce5ec98a4c59296c598afac663467707c7785855ea0a6 not found: ID does not exist" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.122303 4884 scope.go:117] "RemoveContainer" containerID="6dcd856037c529014a50b05d021f9486e5af58fb5b35dc0809375caceb111a63" Dec 10 00:46:16 crc kubenswrapper[4884]: E1210 00:46:16.122752 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6dcd856037c529014a50b05d021f9486e5af58fb5b35dc0809375caceb111a63\": container with ID starting with 6dcd856037c529014a50b05d021f9486e5af58fb5b35dc0809375caceb111a63 not found: ID does not exist" containerID="6dcd856037c529014a50b05d021f9486e5af58fb5b35dc0809375caceb111a63" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.122786 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6dcd856037c529014a50b05d021f9486e5af58fb5b35dc0809375caceb111a63"} err="failed to get container status \"6dcd856037c529014a50b05d021f9486e5af58fb5b35dc0809375caceb111a63\": rpc error: code = NotFound desc = could not find container \"6dcd856037c529014a50b05d021f9486e5af58fb5b35dc0809375caceb111a63\": container with ID starting with 6dcd856037c529014a50b05d021f9486e5af58fb5b35dc0809375caceb111a63 not found: ID does not exist" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.122805 4884 scope.go:117] "RemoveContainer" containerID="22e416b8866572c725bf4fa37e80bc7d58dd00ff8acdf73f8e9f2d806b3599ff" Dec 10 00:46:16 crc kubenswrapper[4884]: E1210 00:46:16.123168 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22e416b8866572c725bf4fa37e80bc7d58dd00ff8acdf73f8e9f2d806b3599ff\": container with ID starting with 22e416b8866572c725bf4fa37e80bc7d58dd00ff8acdf73f8e9f2d806b3599ff not found: ID does not exist" containerID="22e416b8866572c725bf4fa37e80bc7d58dd00ff8acdf73f8e9f2d806b3599ff" Dec 10 00:46:16 crc kubenswrapper[4884]: I1210 00:46:16.123264 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22e416b8866572c725bf4fa37e80bc7d58dd00ff8acdf73f8e9f2d806b3599ff"} err="failed to get container status \"22e416b8866572c725bf4fa37e80bc7d58dd00ff8acdf73f8e9f2d806b3599ff\": rpc error: code = NotFound desc = could not find container \"22e416b8866572c725bf4fa37e80bc7d58dd00ff8acdf73f8e9f2d806b3599ff\": container with ID starting with 22e416b8866572c725bf4fa37e80bc7d58dd00ff8acdf73f8e9f2d806b3599ff not found: ID does not exist" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.294822 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61cdc048-68cf-48c5-95d6-0515b9eae10d" path="/var/lib/kubelet/pods/61cdc048-68cf-48c5-95d6-0515b9eae10d/volumes" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.648043 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["minio-dev/minio"] Dec 10 00:46:17 crc kubenswrapper[4884]: E1210 00:46:17.648399 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61cdc048-68cf-48c5-95d6-0515b9eae10d" containerName="extract-content" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.648417 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="61cdc048-68cf-48c5-95d6-0515b9eae10d" containerName="extract-content" Dec 10 00:46:17 crc kubenswrapper[4884]: E1210 00:46:17.648445 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61cdc048-68cf-48c5-95d6-0515b9eae10d" containerName="extract-utilities" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.648453 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="61cdc048-68cf-48c5-95d6-0515b9eae10d" containerName="extract-utilities" Dec 10 00:46:17 crc kubenswrapper[4884]: E1210 00:46:17.648464 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61cdc048-68cf-48c5-95d6-0515b9eae10d" containerName="registry-server" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.648474 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="61cdc048-68cf-48c5-95d6-0515b9eae10d" containerName="registry-server" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.648600 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="61cdc048-68cf-48c5-95d6-0515b9eae10d" containerName="registry-server" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.649049 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.652969 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"openshift-service-ca.crt" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.653136 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"minio-dev"/"kube-root-ca.crt" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.653254 4884 reflector.go:368] Caches populated for *v1.Secret from object-"minio-dev"/"default-dockercfg-djw6r" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.657425 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.738782 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb9mw\" (UniqueName: \"kubernetes.io/projected/1b6248bf-8cb7-4032-931d-e44af3216991-kube-api-access-nb9mw\") pod \"minio\" (UID: \"1b6248bf-8cb7-4032-931d-e44af3216991\") " pod="minio-dev/minio" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.738901 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-6424726e-e333-41c9-b2ca-fe771ea1e841\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6424726e-e333-41c9-b2ca-fe771ea1e841\") pod \"minio\" (UID: \"1b6248bf-8cb7-4032-931d-e44af3216991\") " pod="minio-dev/minio" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.840509 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-6424726e-e333-41c9-b2ca-fe771ea1e841\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6424726e-e333-41c9-b2ca-fe771ea1e841\") pod \"minio\" (UID: \"1b6248bf-8cb7-4032-931d-e44af3216991\") " pod="minio-dev/minio" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.840569 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb9mw\" (UniqueName: \"kubernetes.io/projected/1b6248bf-8cb7-4032-931d-e44af3216991-kube-api-access-nb9mw\") pod \"minio\" (UID: \"1b6248bf-8cb7-4032-931d-e44af3216991\") " pod="minio-dev/minio" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.844204 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.844248 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-6424726e-e333-41c9-b2ca-fe771ea1e841\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6424726e-e333-41c9-b2ca-fe771ea1e841\") pod \"minio\" (UID: \"1b6248bf-8cb7-4032-931d-e44af3216991\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/584068737e625a89f898f718588e83b3f3d481eb0abb3890259821669c2ffc43/globalmount\"" pod="minio-dev/minio" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.860167 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb9mw\" (UniqueName: \"kubernetes.io/projected/1b6248bf-8cb7-4032-931d-e44af3216991-kube-api-access-nb9mw\") pod \"minio\" (UID: \"1b6248bf-8cb7-4032-931d-e44af3216991\") " pod="minio-dev/minio" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.864649 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-6424726e-e333-41c9-b2ca-fe771ea1e841\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-6424726e-e333-41c9-b2ca-fe771ea1e841\") pod \"minio\" (UID: \"1b6248bf-8cb7-4032-931d-e44af3216991\") " pod="minio-dev/minio" Dec 10 00:46:17 crc kubenswrapper[4884]: I1210 00:46:17.966114 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="minio-dev/minio" Dec 10 00:46:18 crc kubenswrapper[4884]: I1210 00:46:18.097907 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:46:18 crc kubenswrapper[4884]: I1210 00:46:18.097957 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:46:18 crc kubenswrapper[4884]: I1210 00:46:18.176932 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["minio-dev/minio"] Dec 10 00:46:18 crc kubenswrapper[4884]: W1210 00:46:18.189398 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b6248bf_8cb7_4032_931d_e44af3216991.slice/crio-f7b158ff094209ab6f2a57e8898d96d9fd5ea3f639eb26523a3f85ea9b6f56c5 WatchSource:0}: Error finding container f7b158ff094209ab6f2a57e8898d96d9fd5ea3f639eb26523a3f85ea9b6f56c5: Status 404 returned error can't find the container with id f7b158ff094209ab6f2a57e8898d96d9fd5ea3f639eb26523a3f85ea9b6f56c5 Dec 10 00:46:19 crc kubenswrapper[4884]: I1210 00:46:19.091611 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"1b6248bf-8cb7-4032-931d-e44af3216991","Type":"ContainerStarted","Data":"f7b158ff094209ab6f2a57e8898d96d9fd5ea3f639eb26523a3f85ea9b6f56c5"} Dec 10 00:46:20 crc kubenswrapper[4884]: I1210 00:46:20.829760 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:20 crc kubenswrapper[4884]: I1210 00:46:20.830117 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:20 crc kubenswrapper[4884]: I1210 00:46:20.875676 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:21 crc kubenswrapper[4884]: I1210 00:46:21.179110 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:23 crc kubenswrapper[4884]: I1210 00:46:23.123939 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="minio-dev/minio" event={"ID":"1b6248bf-8cb7-4032-931d-e44af3216991","Type":"ContainerStarted","Data":"e39264ea47d9e6392bd1c24db655a89066613ef58316afea8e7dabacbeb29bbb"} Dec 10 00:46:23 crc kubenswrapper[4884]: I1210 00:46:23.151322 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="minio-dev/minio" podStartSLOduration=5.270010229 podStartE2EDuration="9.151291807s" podCreationTimestamp="2025-12-10 00:46:14 +0000 UTC" firstStartedPulling="2025-12-10 00:46:18.188179919 +0000 UTC m=+951.266137036" lastFinishedPulling="2025-12-10 00:46:22.069461457 +0000 UTC m=+955.147418614" observedRunningTime="2025-12-10 00:46:23.143917855 +0000 UTC m=+956.221875022" watchObservedRunningTime="2025-12-10 00:46:23.151291807 +0000 UTC m=+956.229248964" Dec 10 00:46:23 crc kubenswrapper[4884]: I1210 00:46:23.265018 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nqc2n"] Dec 10 00:46:23 crc kubenswrapper[4884]: I1210 00:46:23.265384 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nqc2n" podUID="d4eb6c3d-656f-4e15-b086-6b2535180118" containerName="registry-server" containerID="cri-o://adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0" gracePeriod=2 Dec 10 00:46:23 crc kubenswrapper[4884]: I1210 00:46:23.753854 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:23 crc kubenswrapper[4884]: I1210 00:46:23.945985 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4eb6c3d-656f-4e15-b086-6b2535180118-utilities\") pod \"d4eb6c3d-656f-4e15-b086-6b2535180118\" (UID: \"d4eb6c3d-656f-4e15-b086-6b2535180118\") " Dec 10 00:46:23 crc kubenswrapper[4884]: I1210 00:46:23.946086 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvqg9\" (UniqueName: \"kubernetes.io/projected/d4eb6c3d-656f-4e15-b086-6b2535180118-kube-api-access-zvqg9\") pod \"d4eb6c3d-656f-4e15-b086-6b2535180118\" (UID: \"d4eb6c3d-656f-4e15-b086-6b2535180118\") " Dec 10 00:46:23 crc kubenswrapper[4884]: I1210 00:46:23.946133 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4eb6c3d-656f-4e15-b086-6b2535180118-catalog-content\") pod \"d4eb6c3d-656f-4e15-b086-6b2535180118\" (UID: \"d4eb6c3d-656f-4e15-b086-6b2535180118\") " Dec 10 00:46:23 crc kubenswrapper[4884]: I1210 00:46:23.950104 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4eb6c3d-656f-4e15-b086-6b2535180118-utilities" (OuterVolumeSpecName: "utilities") pod "d4eb6c3d-656f-4e15-b086-6b2535180118" (UID: "d4eb6c3d-656f-4e15-b086-6b2535180118"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:46:23 crc kubenswrapper[4884]: I1210 00:46:23.959685 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4eb6c3d-656f-4e15-b086-6b2535180118-kube-api-access-zvqg9" (OuterVolumeSpecName: "kube-api-access-zvqg9") pod "d4eb6c3d-656f-4e15-b086-6b2535180118" (UID: "d4eb6c3d-656f-4e15-b086-6b2535180118"). InnerVolumeSpecName "kube-api-access-zvqg9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:46:23 crc kubenswrapper[4884]: I1210 00:46:23.964298 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4eb6c3d-656f-4e15-b086-6b2535180118-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d4eb6c3d-656f-4e15-b086-6b2535180118" (UID: "d4eb6c3d-656f-4e15-b086-6b2535180118"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.047733 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvqg9\" (UniqueName: \"kubernetes.io/projected/d4eb6c3d-656f-4e15-b086-6b2535180118-kube-api-access-zvqg9\") on node \"crc\" DevicePath \"\"" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.047768 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4eb6c3d-656f-4e15-b086-6b2535180118-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.047778 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4eb6c3d-656f-4e15-b086-6b2535180118-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.138305 4884 generic.go:334] "Generic (PLEG): container finished" podID="d4eb6c3d-656f-4e15-b086-6b2535180118" containerID="adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0" exitCode=0 Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.138353 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nqc2n" event={"ID":"d4eb6c3d-656f-4e15-b086-6b2535180118","Type":"ContainerDied","Data":"adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0"} Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.138385 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nqc2n" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.138406 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nqc2n" event={"ID":"d4eb6c3d-656f-4e15-b086-6b2535180118","Type":"ContainerDied","Data":"9598e9a03522d787658c479f2a78b7f3547cb2f23ff439adf4054b7d89447819"} Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.138445 4884 scope.go:117] "RemoveContainer" containerID="adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.154834 4884 scope.go:117] "RemoveContainer" containerID="194bc389ba73d73ff44ad409d37dbcfb430a9800224744259c7cc1ca667483c7" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.165102 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nqc2n"] Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.169859 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nqc2n"] Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.182798 4884 scope.go:117] "RemoveContainer" containerID="c0efffaaae02c475d4b72d6a40bcd353aae0b5b488981cbeb2a51d442c476eea" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.195421 4884 scope.go:117] "RemoveContainer" containerID="adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0" Dec 10 00:46:24 crc kubenswrapper[4884]: E1210 00:46:24.196039 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0\": container with ID starting with adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0 not found: ID does not exist" containerID="adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.196123 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0"} err="failed to get container status \"adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0\": rpc error: code = NotFound desc = could not find container \"adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0\": container with ID starting with adf8dbbc87a78279856607d1641d0684bfc27b198522e3f92f305972853e74e0 not found: ID does not exist" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.196164 4884 scope.go:117] "RemoveContainer" containerID="194bc389ba73d73ff44ad409d37dbcfb430a9800224744259c7cc1ca667483c7" Dec 10 00:46:24 crc kubenswrapper[4884]: E1210 00:46:24.196501 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"194bc389ba73d73ff44ad409d37dbcfb430a9800224744259c7cc1ca667483c7\": container with ID starting with 194bc389ba73d73ff44ad409d37dbcfb430a9800224744259c7cc1ca667483c7 not found: ID does not exist" containerID="194bc389ba73d73ff44ad409d37dbcfb430a9800224744259c7cc1ca667483c7" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.196538 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"194bc389ba73d73ff44ad409d37dbcfb430a9800224744259c7cc1ca667483c7"} err="failed to get container status \"194bc389ba73d73ff44ad409d37dbcfb430a9800224744259c7cc1ca667483c7\": rpc error: code = NotFound desc = could not find container \"194bc389ba73d73ff44ad409d37dbcfb430a9800224744259c7cc1ca667483c7\": container with ID starting with 194bc389ba73d73ff44ad409d37dbcfb430a9800224744259c7cc1ca667483c7 not found: ID does not exist" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.196564 4884 scope.go:117] "RemoveContainer" containerID="c0efffaaae02c475d4b72d6a40bcd353aae0b5b488981cbeb2a51d442c476eea" Dec 10 00:46:24 crc kubenswrapper[4884]: E1210 00:46:24.196902 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0efffaaae02c475d4b72d6a40bcd353aae0b5b488981cbeb2a51d442c476eea\": container with ID starting with c0efffaaae02c475d4b72d6a40bcd353aae0b5b488981cbeb2a51d442c476eea not found: ID does not exist" containerID="c0efffaaae02c475d4b72d6a40bcd353aae0b5b488981cbeb2a51d442c476eea" Dec 10 00:46:24 crc kubenswrapper[4884]: I1210 00:46:24.196927 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0efffaaae02c475d4b72d6a40bcd353aae0b5b488981cbeb2a51d442c476eea"} err="failed to get container status \"c0efffaaae02c475d4b72d6a40bcd353aae0b5b488981cbeb2a51d442c476eea\": rpc error: code = NotFound desc = could not find container \"c0efffaaae02c475d4b72d6a40bcd353aae0b5b488981cbeb2a51d442c476eea\": container with ID starting with c0efffaaae02c475d4b72d6a40bcd353aae0b5b488981cbeb2a51d442c476eea not found: ID does not exist" Dec 10 00:46:25 crc kubenswrapper[4884]: I1210 00:46:25.306449 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4eb6c3d-656f-4e15-b086-6b2535180118" path="/var/lib/kubelet/pods/d4eb6c3d-656f-4e15-b086-6b2535180118/volumes" Dec 10 00:46:26 crc kubenswrapper[4884]: I1210 00:46:26.954046 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk"] Dec 10 00:46:26 crc kubenswrapper[4884]: E1210 00:46:26.954497 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4eb6c3d-656f-4e15-b086-6b2535180118" containerName="extract-utilities" Dec 10 00:46:26 crc kubenswrapper[4884]: I1210 00:46:26.954513 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4eb6c3d-656f-4e15-b086-6b2535180118" containerName="extract-utilities" Dec 10 00:46:26 crc kubenswrapper[4884]: E1210 00:46:26.954530 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4eb6c3d-656f-4e15-b086-6b2535180118" containerName="registry-server" Dec 10 00:46:26 crc kubenswrapper[4884]: I1210 00:46:26.954538 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4eb6c3d-656f-4e15-b086-6b2535180118" containerName="registry-server" Dec 10 00:46:26 crc kubenswrapper[4884]: E1210 00:46:26.954549 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4eb6c3d-656f-4e15-b086-6b2535180118" containerName="extract-content" Dec 10 00:46:26 crc kubenswrapper[4884]: I1210 00:46:26.954558 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4eb6c3d-656f-4e15-b086-6b2535180118" containerName="extract-content" Dec 10 00:46:26 crc kubenswrapper[4884]: I1210 00:46:26.954693 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4eb6c3d-656f-4e15-b086-6b2535180118" containerName="registry-server" Dec 10 00:46:26 crc kubenswrapper[4884]: I1210 00:46:26.955192 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:26 crc kubenswrapper[4884]: I1210 00:46:26.958456 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Dec 10 00:46:26 crc kubenswrapper[4884]: I1210 00:46:26.958694 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Dec 10 00:46:26 crc kubenswrapper[4884]: I1210 00:46:26.958868 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-6vfls" Dec 10 00:46:26 crc kubenswrapper[4884]: I1210 00:46:26.959044 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Dec 10 00:46:26 crc kubenswrapper[4884]: I1210 00:46:26.959306 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Dec 10 00:46:26 crc kubenswrapper[4884]: I1210 00:46:26.966011 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk"] Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.083624 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b58ab17f-8141-4a6c-ba11-5409fae236c6-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.083666 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/b58ab17f-8141-4a6c-ba11-5409fae236c6-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.083696 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/b58ab17f-8141-4a6c-ba11-5409fae236c6-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.083726 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtpvz\" (UniqueName: \"kubernetes.io/projected/b58ab17f-8141-4a6c-ba11-5409fae236c6-kube-api-access-rtpvz\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.083768 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b58ab17f-8141-4a6c-ba11-5409fae236c6-config\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.122149 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-2qrrl"] Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.123362 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.124986 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.125252 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.125379 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.136156 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-2qrrl"] Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.184601 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc"] Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.185259 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.186489 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b58ab17f-8141-4a6c-ba11-5409fae236c6-config\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.186605 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b58ab17f-8141-4a6c-ba11-5409fae236c6-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.186639 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/b58ab17f-8141-4a6c-ba11-5409fae236c6-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.186678 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/b58ab17f-8141-4a6c-ba11-5409fae236c6-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.186723 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtpvz\" (UniqueName: \"kubernetes.io/projected/b58ab17f-8141-4a6c-ba11-5409fae236c6-kube-api-access-rtpvz\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.190158 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-ca-bundle" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.190272 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-http" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.190332 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-http" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.190377 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-distributor-grpc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.190346 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-query-frontend-grpc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.190660 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-config" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.200924 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b58ab17f-8141-4a6c-ba11-5409fae236c6-logging-loki-ca-bundle\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.201624 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b58ab17f-8141-4a6c-ba11-5409fae236c6-config\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.203844 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc"] Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.205268 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-http\" (UniqueName: \"kubernetes.io/secret/b58ab17f-8141-4a6c-ba11-5409fae236c6-logging-loki-distributor-http\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.206944 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-distributor-grpc\" (UniqueName: \"kubernetes.io/secret/b58ab17f-8141-4a6c-ba11-5409fae236c6-logging-loki-distributor-grpc\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.218249 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtpvz\" (UniqueName: \"kubernetes.io/projected/b58ab17f-8141-4a6c-ba11-5409fae236c6-kube-api-access-rtpvz\") pod \"logging-loki-distributor-76cc67bf56-rhlrk\" (UID: \"b58ab17f-8141-4a6c-ba11-5409fae236c6\") " pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.276084 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-dockercfg-6vfls" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.285277 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.289100 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/c21130da-105e-48b9-a89b-a189cb685b5d-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.289156 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/b4520f02-2919-4b32-96f8-8e23a7c13e6c-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.289190 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/b4520f02-2919-4b32-96f8-8e23a7c13e6c-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.289215 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgdb7\" (UniqueName: \"kubernetes.io/projected/c21130da-105e-48b9-a89b-a189cb685b5d-kube-api-access-jgdb7\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.289239 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4520f02-2919-4b32-96f8-8e23a7c13e6c-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.289269 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hkbr\" (UniqueName: \"kubernetes.io/projected/b4520f02-2919-4b32-96f8-8e23a7c13e6c-kube-api-access-9hkbr\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.289296 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/c21130da-105e-48b9-a89b-a189cb685b5d-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.289334 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c21130da-105e-48b9-a89b-a189cb685b5d-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.289371 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c21130da-105e-48b9-a89b-a189cb685b5d-config\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.289410 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4520f02-2919-4b32-96f8-8e23a7c13e6c-config\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.289469 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/c21130da-105e-48b9-a89b-a189cb685b5d-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.303138 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-98d4874d8-j2jj4"] Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.304099 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.307794 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-http" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.308235 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.308372 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-dockercfg-m5lxm" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.308493 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-gateway-client-http" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.308599 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-gateway-98d4874d8-qzbtl"] Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.308627 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway-ca-bundle" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.308703 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"logging-loki-gateway" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.309755 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.321301 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-98d4874d8-j2jj4"] Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.326153 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-98d4874d8-qzbtl"] Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394153 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-tls-secret\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394215 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/b4520f02-2919-4b32-96f8-8e23a7c13e6c-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394244 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/c21130da-105e-48b9-a89b-a189cb685b5d-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394277 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-tenants\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394295 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sljg\" (UniqueName: \"kubernetes.io/projected/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-kube-api-access-7sljg\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394312 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/b4520f02-2919-4b32-96f8-8e23a7c13e6c-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394328 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgdb7\" (UniqueName: \"kubernetes.io/projected/c21130da-105e-48b9-a89b-a189cb685b5d-kube-api-access-jgdb7\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394347 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-lokistack-gateway\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394365 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4520f02-2919-4b32-96f8-8e23a7c13e6c-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394382 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394400 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hkbr\" (UniqueName: \"kubernetes.io/projected/b4520f02-2919-4b32-96f8-8e23a7c13e6c-kube-api-access-9hkbr\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394418 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/c21130da-105e-48b9-a89b-a189cb685b5d-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394472 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c21130da-105e-48b9-a89b-a189cb685b5d-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394498 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c21130da-105e-48b9-a89b-a189cb685b5d-config\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394521 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4520f02-2919-4b32-96f8-8e23a7c13e6c-config\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394549 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394572 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/c21130da-105e-48b9-a89b-a189cb685b5d-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394600 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-logging-loki-ca-bundle\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.394616 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-rbac\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.398773 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b4520f02-2919-4b32-96f8-8e23a7c13e6c-logging-loki-ca-bundle\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.398854 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-http\" (UniqueName: \"kubernetes.io/secret/b4520f02-2919-4b32-96f8-8e23a7c13e6c-logging-loki-query-frontend-http\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.399384 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c21130da-105e-48b9-a89b-a189cb685b5d-config\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.399910 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4520f02-2919-4b32-96f8-8e23a7c13e6c-config\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.400297 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-http" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.400477 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c21130da-105e-48b9-a89b-a189cb685b5d-logging-loki-ca-bundle\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.400987 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-query-frontend-grpc\" (UniqueName: \"kubernetes.io/secret/b4520f02-2919-4b32-96f8-8e23a7c13e6c-logging-loki-query-frontend-grpc\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.402486 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-s3" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.402620 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-querier-grpc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.413077 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-http\" (UniqueName: \"kubernetes.io/secret/c21130da-105e-48b9-a89b-a189cb685b5d-logging-loki-querier-http\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.415497 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-querier-grpc\" (UniqueName: \"kubernetes.io/secret/c21130da-105e-48b9-a89b-a189cb685b5d-logging-loki-querier-grpc\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.415667 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/c21130da-105e-48b9-a89b-a189cb685b5d-logging-loki-s3\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.419416 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgdb7\" (UniqueName: \"kubernetes.io/projected/c21130da-105e-48b9-a89b-a189cb685b5d-kube-api-access-jgdb7\") pod \"logging-loki-querier-5895d59bb8-2qrrl\" (UID: \"c21130da-105e-48b9-a89b-a189cb685b5d\") " pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.426961 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hkbr\" (UniqueName: \"kubernetes.io/projected/b4520f02-2919-4b32-96f8-8e23a7c13e6c-kube-api-access-9hkbr\") pod \"logging-loki-query-frontend-84558f7c9f-ggmrc\" (UID: \"b4520f02-2919-4b32-96f8-8e23a7c13e6c\") " pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.449141 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.504569 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.504945 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-tls-secret\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505005 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-rbac\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505033 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505059 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-tenants\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505082 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sljg\" (UniqueName: \"kubernetes.io/projected/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-kube-api-access-7sljg\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505105 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-lokistack-gateway\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505127 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505176 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xh7rr\" (UniqueName: \"kubernetes.io/projected/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-kube-api-access-xh7rr\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505200 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-tls-secret\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505231 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505255 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-logging-loki-ca-bundle\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505286 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505312 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-lokistack-gateway\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505335 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-logging-loki-ca-bundle\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505352 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-rbac\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.505371 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-tenants\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.506350 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-logging-loki-ca-bundle\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.506372 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.506370 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-lokistack-gateway\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.507051 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-rbac\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.509150 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-tls-secret\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.510337 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-tenants\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.511664 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.534176 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sljg\" (UniqueName: \"kubernetes.io/projected/165d85c1-40f2-4a21-af8c-ae510a8fe6a1-kube-api-access-7sljg\") pod \"logging-loki-gateway-98d4874d8-j2jj4\" (UID: \"165d85c1-40f2-4a21-af8c-ae510a8fe6a1\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.562255 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk"] Dec 10 00:46:27 crc kubenswrapper[4884]: W1210 00:46:27.601212 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb58ab17f_8141_4a6c_ba11_5409fae236c6.slice/crio-c467870acd89e07296277d125f03e4360e6b2be8a2b56ae5f5264fb383080e79 WatchSource:0}: Error finding container c467870acd89e07296277d125f03e4360e6b2be8a2b56ae5f5264fb383080e79: Status 404 returned error can't find the container with id c467870acd89e07296277d125f03e4360e6b2be8a2b56ae5f5264fb383080e79 Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.607264 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xh7rr\" (UniqueName: \"kubernetes.io/projected/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-kube-api-access-xh7rr\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.607470 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-tls-secret\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.607636 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.607701 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-logging-loki-ca-bundle\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.607774 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-lokistack-gateway\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.607817 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-tenants\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.607851 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-rbac\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.607883 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.608915 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lokistack-gateway\" (UniqueName: \"kubernetes.io/configmap/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-lokistack-gateway\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.609232 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-logging-loki-ca-bundle\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.609524 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-logging-loki-gateway-ca-bundle\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.609972 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rbac\" (UniqueName: \"kubernetes.io/configmap/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-rbac\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.611355 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-secret\" (UniqueName: \"kubernetes.io/secret/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-tls-secret\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.613421 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tenants\" (UniqueName: \"kubernetes.io/secret/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-tenants\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.618032 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-gateway-client-http\" (UniqueName: \"kubernetes.io/secret/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-logging-loki-gateway-client-http\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.631366 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xh7rr\" (UniqueName: \"kubernetes.io/projected/a971900d-5e53-42f1-ac1a-3ec4a99b3d32-kube-api-access-xh7rr\") pod \"logging-loki-gateway-98d4874d8-qzbtl\" (UID: \"a971900d-5e53-42f1-ac1a-3ec4a99b3d32\") " pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.674769 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:27 crc kubenswrapper[4884]: I1210 00:46:27.698693 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.032829 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc"] Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.044356 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-querier-5895d59bb8-2qrrl"] Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.054330 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-98d4874d8-j2jj4"] Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.144388 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-gateway-98d4874d8-qzbtl"] Dec 10 00:46:28 crc kubenswrapper[4884]: W1210 00:46:28.144976 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda971900d_5e53_42f1_ac1a_3ec4a99b3d32.slice/crio-0cd4b3adc027714d6e7b105ce8172f9e96fa91e6102478c0e40fa06778db7c44 WatchSource:0}: Error finding container 0cd4b3adc027714d6e7b105ce8172f9e96fa91e6102478c0e40fa06778db7c44: Status 404 returned error can't find the container with id 0cd4b3adc027714d6e7b105ce8172f9e96fa91e6102478c0e40fa06778db7c44 Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.154321 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.155115 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.158298 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-grpc" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.158460 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-ingester-http" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.160023 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.167790 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.168603 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.170835 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-http" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.170899 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-compactor-grpc" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.171740 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" event={"ID":"165d85c1-40f2-4a21-af8c-ae510a8fe6a1","Type":"ContainerStarted","Data":"2c4d4b47533d28e5060a01ce7d57fdc90798c10e52c2964ccf477e2d9611b103"} Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.172833 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" event={"ID":"b4520f02-2919-4b32-96f8-8e23a7c13e6c","Type":"ContainerStarted","Data":"84e312bdae44feabfcd96b43b4a4a0153096f18f82d1346f25883c65d5088c5b"} Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.174407 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" event={"ID":"c21130da-105e-48b9-a89b-a189cb685b5d","Type":"ContainerStarted","Data":"13c6d869203544648f74ccf8aa65475ba67df4e81ecbceb70064c3bec7f654d8"} Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.181504 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" event={"ID":"b58ab17f-8141-4a6c-ba11-5409fae236c6","Type":"ContainerStarted","Data":"c467870acd89e07296277d125f03e4360e6b2be8a2b56ae5f5264fb383080e79"} Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.183680 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" event={"ID":"a971900d-5e53-42f1-ac1a-3ec4a99b3d32","Type":"ContainerStarted","Data":"0cd4b3adc027714d6e7b105ce8172f9e96fa91e6102478c0e40fa06778db7c44"} Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.198801 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.228383 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.229389 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.231272 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-grpc" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.231414 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"logging-loki-index-gateway-http" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.236144 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325452 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/68b75b37-1530-4b53-95c9-2c4073c1120b-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325507 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/68b75b37-1530-4b53-95c9-2c4073c1120b-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325539 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/e5d84713-f630-43e2-9666-86b465112548-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325571 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/68b75b37-1530-4b53-95c9-2c4073c1120b-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325598 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e5d84713-f630-43e2-9666-86b465112548-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325628 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/795df1c2-a13d-4196-9246-0ef72ca9d141-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325655 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97zbp\" (UniqueName: \"kubernetes.io/projected/795df1c2-a13d-4196-9246-0ef72ca9d141-kube-api-access-97zbp\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325684 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b0702c5b-b3be-4581-841c-c279799fe961\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b0702c5b-b3be-4581-841c-c279799fe961\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325709 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5nkk\" (UniqueName: \"kubernetes.io/projected/e5d84713-f630-43e2-9666-86b465112548-kube-api-access-q5nkk\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325734 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5d84713-f630-43e2-9666-86b465112548-config\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325763 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/795df1c2-a13d-4196-9246-0ef72ca9d141-config\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325796 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/68b75b37-1530-4b53-95c9-2c4073c1120b-config\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325821 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/e5d84713-f630-43e2-9666-86b465112548-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325876 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/68b75b37-1530-4b53-95c9-2c4073c1120b-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325902 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-679948ba-47e8-42a6-90c3-4da959f687c7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-679948ba-47e8-42a6-90c3-4da959f687c7\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325930 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/795df1c2-a13d-4196-9246-0ef72ca9d141-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325963 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-34ff955a-e143-40b6-ba9b-f857c92b7225\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-34ff955a-e143-40b6-ba9b-f857c92b7225\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.325990 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/e5d84713-f630-43e2-9666-86b465112548-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.326013 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/795df1c2-a13d-4196-9246-0ef72ca9d141-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.326422 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-298abe1d-b2bd-4b30-9cf7-a365cdb9b362\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-298abe1d-b2bd-4b30-9cf7-a365cdb9b362\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.326480 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/795df1c2-a13d-4196-9246-0ef72ca9d141-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.326514 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhbmv\" (UniqueName: \"kubernetes.io/projected/68b75b37-1530-4b53-95c9-2c4073c1120b-kube-api-access-dhbmv\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.427707 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/68b75b37-1530-4b53-95c9-2c4073c1120b-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.427755 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-679948ba-47e8-42a6-90c3-4da959f687c7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-679948ba-47e8-42a6-90c3-4da959f687c7\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.427787 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/795df1c2-a13d-4196-9246-0ef72ca9d141-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.427808 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-34ff955a-e143-40b6-ba9b-f857c92b7225\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-34ff955a-e143-40b6-ba9b-f857c92b7225\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.427828 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/e5d84713-f630-43e2-9666-86b465112548-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.427845 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/795df1c2-a13d-4196-9246-0ef72ca9d141-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.427861 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-298abe1d-b2bd-4b30-9cf7-a365cdb9b362\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-298abe1d-b2bd-4b30-9cf7-a365cdb9b362\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.427883 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/795df1c2-a13d-4196-9246-0ef72ca9d141-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.427915 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhbmv\" (UniqueName: \"kubernetes.io/projected/68b75b37-1530-4b53-95c9-2c4073c1120b-kube-api-access-dhbmv\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.427943 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/68b75b37-1530-4b53-95c9-2c4073c1120b-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.427963 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/68b75b37-1530-4b53-95c9-2c4073c1120b-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.427985 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/e5d84713-f630-43e2-9666-86b465112548-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.428007 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/68b75b37-1530-4b53-95c9-2c4073c1120b-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.428031 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e5d84713-f630-43e2-9666-86b465112548-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.428059 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97zbp\" (UniqueName: \"kubernetes.io/projected/795df1c2-a13d-4196-9246-0ef72ca9d141-kube-api-access-97zbp\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.428081 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/795df1c2-a13d-4196-9246-0ef72ca9d141-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.428103 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b0702c5b-b3be-4581-841c-c279799fe961\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b0702c5b-b3be-4581-841c-c279799fe961\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.428126 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5nkk\" (UniqueName: \"kubernetes.io/projected/e5d84713-f630-43e2-9666-86b465112548-kube-api-access-q5nkk\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.428153 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5d84713-f630-43e2-9666-86b465112548-config\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.428182 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/795df1c2-a13d-4196-9246-0ef72ca9d141-config\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.428214 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/68b75b37-1530-4b53-95c9-2c4073c1120b-config\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.428232 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/e5d84713-f630-43e2-9666-86b465112548-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.428637 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/68b75b37-1530-4b53-95c9-2c4073c1120b-logging-loki-ca-bundle\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.430171 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5d84713-f630-43e2-9666-86b465112548-config\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.430373 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/795df1c2-a13d-4196-9246-0ef72ca9d141-config\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.430386 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/795df1c2-a13d-4196-9246-0ef72ca9d141-logging-loki-ca-bundle\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.430732 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/68b75b37-1530-4b53-95c9-2c4073c1120b-config\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.431204 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/e5d84713-f630-43e2-9666-86b465112548-logging-loki-ca-bundle\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.434373 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-grpc\" (UniqueName: \"kubernetes.io/secret/e5d84713-f630-43e2-9666-86b465112548-logging-loki-compactor-grpc\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.435124 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-http\" (UniqueName: \"kubernetes.io/secret/68b75b37-1530-4b53-95c9-2c4073c1120b-logging-loki-ingester-http\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.436040 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.436070 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b0702c5b-b3be-4581-841c-c279799fe961\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b0702c5b-b3be-4581-841c-c279799fe961\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0b49dd49c5a69783e045d4e249b364f6b07ef3ffb5f8f1b60e1a2e7d2c70a22c/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.439777 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-grpc\" (UniqueName: \"kubernetes.io/secret/795df1c2-a13d-4196-9246-0ef72ca9d141-logging-loki-index-gateway-grpc\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.440291 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/795df1c2-a13d-4196-9246-0ef72ca9d141-logging-loki-s3\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.440751 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-index-gateway-http\" (UniqueName: \"kubernetes.io/secret/795df1c2-a13d-4196-9246-0ef72ca9d141-logging-loki-index-gateway-http\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.441702 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-compactor-http\" (UniqueName: \"kubernetes.io/secret/e5d84713-f630-43e2-9666-86b465112548-logging-loki-compactor-http\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.443348 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/68b75b37-1530-4b53-95c9-2c4073c1120b-logging-loki-s3\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.445344 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.445382 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-298abe1d-b2bd-4b30-9cf7-a365cdb9b362\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-298abe1d-b2bd-4b30-9cf7-a365cdb9b362\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e79ab5672df111d0ec8b3034ef34c8c175b68b3d876fb25da0f3546b56dcdb5d/globalmount\"" pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.445672 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.445710 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-34ff955a-e143-40b6-ba9b-f857c92b7225\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-34ff955a-e143-40b6-ba9b-f857c92b7225\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ef1fa2b67d6fb47dca654df64739d09c9dadcdb66da368937b319472d3126e20/globalmount\"" pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.445950 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-ingester-grpc\" (UniqueName: \"kubernetes.io/secret/68b75b37-1530-4b53-95c9-2c4073c1120b-logging-loki-ingester-grpc\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.448204 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logging-loki-s3\" (UniqueName: \"kubernetes.io/secret/e5d84713-f630-43e2-9666-86b465112548-logging-loki-s3\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.450570 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97zbp\" (UniqueName: \"kubernetes.io/projected/795df1c2-a13d-4196-9246-0ef72ca9d141-kube-api-access-97zbp\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.454704 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5nkk\" (UniqueName: \"kubernetes.io/projected/e5d84713-f630-43e2-9666-86b465112548-kube-api-access-q5nkk\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.474637 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-34ff955a-e143-40b6-ba9b-f857c92b7225\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-34ff955a-e143-40b6-ba9b-f857c92b7225\") pod \"logging-loki-index-gateway-0\" (UID: \"795df1c2-a13d-4196-9246-0ef72ca9d141\") " pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.478449 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b0702c5b-b3be-4581-841c-c279799fe961\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b0702c5b-b3be-4581-841c-c279799fe961\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.482392 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-298abe1d-b2bd-4b30-9cf7-a365cdb9b362\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-298abe1d-b2bd-4b30-9cf7-a365cdb9b362\") pod \"logging-loki-compactor-0\" (UID: \"e5d84713-f630-43e2-9666-86b465112548\") " pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.499986 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.555019 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.755106 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-compactor-0"] Dec 10 00:46:28 crc kubenswrapper[4884]: I1210 00:46:28.817021 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-index-gateway-0"] Dec 10 00:46:28 crc kubenswrapper[4884]: W1210 00:46:28.820501 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod795df1c2_a13d_4196_9246_0ef72ca9d141.slice/crio-9e65d0279284b56d052613a00822463e1571e9e4ccef6fe876a0af808450e2a7 WatchSource:0}: Error finding container 9e65d0279284b56d052613a00822463e1571e9e4ccef6fe876a0af808450e2a7: Status 404 returned error can't find the container with id 9e65d0279284b56d052613a00822463e1571e9e4ccef6fe876a0af808450e2a7 Dec 10 00:46:29 crc kubenswrapper[4884]: I1210 00:46:29.192166 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"795df1c2-a13d-4196-9246-0ef72ca9d141","Type":"ContainerStarted","Data":"9e65d0279284b56d052613a00822463e1571e9e4ccef6fe876a0af808450e2a7"} Dec 10 00:46:29 crc kubenswrapper[4884]: I1210 00:46:29.193518 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"e5d84713-f630-43e2-9666-86b465112548","Type":"ContainerStarted","Data":"6aab039456f53215bc11a74facc8f6dd84f159ff663208be45f9a225aad40049"} Dec 10 00:46:30 crc kubenswrapper[4884]: I1210 00:46:30.174245 4884 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 10 00:46:30 crc kubenswrapper[4884]: I1210 00:46:30.174779 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-679948ba-47e8-42a6-90c3-4da959f687c7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-679948ba-47e8-42a6-90c3-4da959f687c7\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e51e2a8bf7e83c34b4e2283bb678e2f2f7301e8706ef8720e943343293965b6a/globalmount\"" pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:30 crc kubenswrapper[4884]: I1210 00:46:30.220635 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-679948ba-47e8-42a6-90c3-4da959f687c7\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-679948ba-47e8-42a6-90c3-4da959f687c7\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:30 crc kubenswrapper[4884]: I1210 00:46:30.261138 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhbmv\" (UniqueName: \"kubernetes.io/projected/68b75b37-1530-4b53-95c9-2c4073c1120b-kube-api-access-dhbmv\") pod \"logging-loki-ingester-0\" (UID: \"68b75b37-1530-4b53-95c9-2c4073c1120b\") " pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:30 crc kubenswrapper[4884]: I1210 00:46:30.283257 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:30 crc kubenswrapper[4884]: I1210 00:46:30.603320 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/logging-loki-ingester-0"] Dec 10 00:46:31 crc kubenswrapper[4884]: I1210 00:46:31.208045 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"68b75b37-1530-4b53-95c9-2c4073c1120b","Type":"ContainerStarted","Data":"0ff9009af6c3dd8ce6e04b17e86d15fb1d5eaacab5be3293b409f4cb5ac5e1da"} Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.233992 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" event={"ID":"b58ab17f-8141-4a6c-ba11-5409fae236c6","Type":"ContainerStarted","Data":"bfc6e18a90c2eba96bf42159abf21cbb7de19cbd008b655ae9021b14faabf1ab"} Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.234579 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.235857 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-index-gateway-0" event={"ID":"795df1c2-a13d-4196-9246-0ef72ca9d141","Type":"ContainerStarted","Data":"74ebee1da3390d152020f452a52af824a5d28beedfa45d2edfa5fdb9aba8ad67"} Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.236457 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.237709 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-ingester-0" event={"ID":"68b75b37-1530-4b53-95c9-2c4073c1120b","Type":"ContainerStarted","Data":"220800713aef9371758793265e0baa0c42fb5db282372e39c58b7ba168431362"} Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.254729 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.258412 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" podStartSLOduration=2.185524728 podStartE2EDuration="7.258393386s" podCreationTimestamp="2025-12-10 00:46:26 +0000 UTC" firstStartedPulling="2025-12-10 00:46:27.617220869 +0000 UTC m=+960.695177986" lastFinishedPulling="2025-12-10 00:46:32.690089527 +0000 UTC m=+965.768046644" observedRunningTime="2025-12-10 00:46:33.255394538 +0000 UTC m=+966.333351655" watchObservedRunningTime="2025-12-10 00:46:33.258393386 +0000 UTC m=+966.336350503" Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.272700 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" event={"ID":"165d85c1-40f2-4a21-af8c-ae510a8fe6a1","Type":"ContainerStarted","Data":"271255d082e24d54935d853c651dd4f21a934a84d9fda4f27e4d294dc129dd93"} Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.277996 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" event={"ID":"a971900d-5e53-42f1-ac1a-3ec4a99b3d32","Type":"ContainerStarted","Data":"89b01dee24125ef8254c82d631b9196e9e0085ba0acd10e560ce6787bea19cc4"} Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.279574 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" event={"ID":"b4520f02-2919-4b32-96f8-8e23a7c13e6c","Type":"ContainerStarted","Data":"56b4106d78fe80243850afba3f238899f0399265f16c2e8e23f6f6c862468f21"} Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.280268 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.282709 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" event={"ID":"c21130da-105e-48b9-a89b-a189cb685b5d","Type":"ContainerStarted","Data":"a6ea3805d52a9b6d04372387e243036095933459397a7c14a43279a50119c865"} Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.282851 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.284561 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-compactor-0" event={"ID":"e5d84713-f630-43e2-9666-86b465112548","Type":"ContainerStarted","Data":"00bb152ea94eb365675885f6ac57a90a29c18495f308de26da3e339f64923abf"} Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.284876 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.312620 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-ingester-0" podStartSLOduration=4.216687513 podStartE2EDuration="6.312605058s" podCreationTimestamp="2025-12-10 00:46:27 +0000 UTC" firstStartedPulling="2025-12-10 00:46:30.627704356 +0000 UTC m=+963.705661473" lastFinishedPulling="2025-12-10 00:46:32.723621871 +0000 UTC m=+965.801579018" observedRunningTime="2025-12-10 00:46:33.302444573 +0000 UTC m=+966.380401700" watchObservedRunningTime="2025-12-10 00:46:33.312605058 +0000 UTC m=+966.390562175" Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.327601 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-index-gateway-0" podStartSLOduration=2.467344208 podStartE2EDuration="6.327581258s" podCreationTimestamp="2025-12-10 00:46:27 +0000 UTC" firstStartedPulling="2025-12-10 00:46:28.823558554 +0000 UTC m=+961.901515671" lastFinishedPulling="2025-12-10 00:46:32.683795604 +0000 UTC m=+965.761752721" observedRunningTime="2025-12-10 00:46:33.323380819 +0000 UTC m=+966.401337946" watchObservedRunningTime="2025-12-10 00:46:33.327581258 +0000 UTC m=+966.405538375" Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.344217 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" podStartSLOduration=1.699247383 podStartE2EDuration="6.344202201s" podCreationTimestamp="2025-12-10 00:46:27 +0000 UTC" firstStartedPulling="2025-12-10 00:46:28.045387256 +0000 UTC m=+961.123344373" lastFinishedPulling="2025-12-10 00:46:32.690342064 +0000 UTC m=+965.768299191" observedRunningTime="2025-12-10 00:46:33.343506604 +0000 UTC m=+966.421463731" watchObservedRunningTime="2025-12-10 00:46:33.344202201 +0000 UTC m=+966.422159318" Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.362366 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" podStartSLOduration=1.694553392 podStartE2EDuration="6.362349735s" podCreationTimestamp="2025-12-10 00:46:27 +0000 UTC" firstStartedPulling="2025-12-10 00:46:28.056970598 +0000 UTC m=+961.134927715" lastFinishedPulling="2025-12-10 00:46:32.724766941 +0000 UTC m=+965.802724058" observedRunningTime="2025-12-10 00:46:33.359824559 +0000 UTC m=+966.437781676" watchObservedRunningTime="2025-12-10 00:46:33.362349735 +0000 UTC m=+966.440306852" Dec 10 00:46:33 crc kubenswrapper[4884]: I1210 00:46:33.380776 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-compactor-0" podStartSLOduration=2.487647038 podStartE2EDuration="6.380761154s" podCreationTimestamp="2025-12-10 00:46:27 +0000 UTC" firstStartedPulling="2025-12-10 00:46:28.764541556 +0000 UTC m=+961.842498673" lastFinishedPulling="2025-12-10 00:46:32.657655672 +0000 UTC m=+965.735612789" observedRunningTime="2025-12-10 00:46:33.376456853 +0000 UTC m=+966.454413980" watchObservedRunningTime="2025-12-10 00:46:33.380761154 +0000 UTC m=+966.458718271" Dec 10 00:46:36 crc kubenswrapper[4884]: I1210 00:46:36.309059 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" event={"ID":"a971900d-5e53-42f1-ac1a-3ec4a99b3d32","Type":"ContainerStarted","Data":"ac33a5ce7dc06f8696c6fb916a4df00617f01df7c87b259bf77bc28df5956bb7"} Dec 10 00:46:36 crc kubenswrapper[4884]: I1210 00:46:36.309780 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:36 crc kubenswrapper[4884]: I1210 00:46:36.311251 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" event={"ID":"165d85c1-40f2-4a21-af8c-ae510a8fe6a1","Type":"ContainerStarted","Data":"e985dc3f279df6ee041e2693d943b6f82648ab57ede91b48c3e9c59191221845"} Dec 10 00:46:36 crc kubenswrapper[4884]: I1210 00:46:36.311489 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:36 crc kubenswrapper[4884]: I1210 00:46:36.311716 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:36 crc kubenswrapper[4884]: I1210 00:46:36.319945 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:36 crc kubenswrapper[4884]: I1210 00:46:36.322308 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:36 crc kubenswrapper[4884]: I1210 00:46:36.327131 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" Dec 10 00:46:36 crc kubenswrapper[4884]: I1210 00:46:36.335226 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" podStartSLOduration=2.3044032420000002 podStartE2EDuration="9.33519969s" podCreationTimestamp="2025-12-10 00:46:27 +0000 UTC" firstStartedPulling="2025-12-10 00:46:28.152298392 +0000 UTC m=+961.230255509" lastFinishedPulling="2025-12-10 00:46:35.18309483 +0000 UTC m=+968.261051957" observedRunningTime="2025-12-10 00:46:36.331768661 +0000 UTC m=+969.409725808" watchObservedRunningTime="2025-12-10 00:46:36.33519969 +0000 UTC m=+969.413156847" Dec 10 00:46:36 crc kubenswrapper[4884]: I1210 00:46:36.374070 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/logging-loki-gateway-98d4874d8-j2jj4" podStartSLOduration=2.264469343 podStartE2EDuration="9.374042893s" podCreationTimestamp="2025-12-10 00:46:27 +0000 UTC" firstStartedPulling="2025-12-10 00:46:28.068304673 +0000 UTC m=+961.146261800" lastFinishedPulling="2025-12-10 00:46:35.177878233 +0000 UTC m=+968.255835350" observedRunningTime="2025-12-10 00:46:36.363290472 +0000 UTC m=+969.441247639" watchObservedRunningTime="2025-12-10 00:46:36.374042893 +0000 UTC m=+969.452000050" Dec 10 00:46:37 crc kubenswrapper[4884]: I1210 00:46:37.319878 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:37 crc kubenswrapper[4884]: I1210 00:46:37.337990 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-gateway-98d4874d8-qzbtl" Dec 10 00:46:47 crc kubenswrapper[4884]: I1210 00:46:47.298958 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-distributor-76cc67bf56-rhlrk" Dec 10 00:46:47 crc kubenswrapper[4884]: I1210 00:46:47.462050 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-querier-5895d59bb8-2qrrl" Dec 10 00:46:47 crc kubenswrapper[4884]: I1210 00:46:47.514567 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-query-frontend-84558f7c9f-ggmrc" Dec 10 00:46:48 crc kubenswrapper[4884]: I1210 00:46:48.098426 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:46:48 crc kubenswrapper[4884]: I1210 00:46:48.098529 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:46:48 crc kubenswrapper[4884]: I1210 00:46:48.098569 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:46:48 crc kubenswrapper[4884]: I1210 00:46:48.099067 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fb549c86290e5eaaf8d0b787f8b183ae212b7154cabf782092b91c5f95b4ef4c"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 00:46:48 crc kubenswrapper[4884]: I1210 00:46:48.099119 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://fb549c86290e5eaaf8d0b787f8b183ae212b7154cabf782092b91c5f95b4ef4c" gracePeriod=600 Dec 10 00:46:48 crc kubenswrapper[4884]: I1210 00:46:48.393315 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="fb549c86290e5eaaf8d0b787f8b183ae212b7154cabf782092b91c5f95b4ef4c" exitCode=0 Dec 10 00:46:48 crc kubenswrapper[4884]: I1210 00:46:48.393363 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"fb549c86290e5eaaf8d0b787f8b183ae212b7154cabf782092b91c5f95b4ef4c"} Dec 10 00:46:48 crc kubenswrapper[4884]: I1210 00:46:48.393406 4884 scope.go:117] "RemoveContainer" containerID="1c531d5171ce3be4c53ba6e890eb2f889d87559fb1212cb5a42702e23d9e930b" Dec 10 00:46:48 crc kubenswrapper[4884]: I1210 00:46:48.507001 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-compactor-0" Dec 10 00:46:48 crc kubenswrapper[4884]: I1210 00:46:48.563401 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-index-gateway-0" Dec 10 00:46:49 crc kubenswrapper[4884]: I1210 00:46:49.403082 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"85d53d23856c70f8c66a5944018254f82db2773d6bfbe9b53696ef024e9cffda"} Dec 10 00:46:50 crc kubenswrapper[4884]: I1210 00:46:50.290673 4884 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Dec 10 00:46:50 crc kubenswrapper[4884]: I1210 00:46:50.291136 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="68b75b37-1530-4b53-95c9-2c4073c1120b" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 10 00:47:00 crc kubenswrapper[4884]: I1210 00:47:00.292044 4884 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: this instance owns no tokens Dec 10 00:47:00 crc kubenswrapper[4884]: I1210 00:47:00.292703 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="68b75b37-1530-4b53-95c9-2c4073c1120b" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 10 00:47:10 crc kubenswrapper[4884]: I1210 00:47:10.288858 4884 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Dec 10 00:47:10 crc kubenswrapper[4884]: I1210 00:47:10.289580 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="68b75b37-1530-4b53-95c9-2c4073c1120b" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 10 00:47:20 crc kubenswrapper[4884]: I1210 00:47:20.287769 4884 patch_prober.go:28] interesting pod/logging-loki-ingester-0 container/loki-ingester namespace/openshift-logging: Readiness probe status=failure output="HTTP probe failed with statuscode: 503" start-of-body=Ingester not ready: waiting for 15s after being ready Dec 10 00:47:20 crc kubenswrapper[4884]: I1210 00:47:20.289623 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-logging/logging-loki-ingester-0" podUID="68b75b37-1530-4b53-95c9-2c4073c1120b" containerName="loki-ingester" probeResult="failure" output="HTTP probe failed with statuscode: 503" Dec 10 00:47:30 crc kubenswrapper[4884]: I1210 00:47:30.290221 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-logging/logging-loki-ingester-0" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.418795 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-vsmqx"] Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.420153 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: W1210 00:47:48.422316 4884 reflector.go:561] object-"openshift-logging"/"collector-metrics": failed to list *v1.Secret: secrets "collector-metrics" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-logging": no relationship found between node 'crc' and this object Dec 10 00:47:48 crc kubenswrapper[4884]: E1210 00:47:48.422401 4884 reflector.go:158] "Unhandled Error" err="object-\"openshift-logging\"/\"collector-metrics\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"collector-metrics\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-logging\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.422877 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Dec 10 00:47:48 crc kubenswrapper[4884]: W1210 00:47:48.425990 4884 reflector.go:561] object-"openshift-logging"/"collector-token": failed to list *v1.Secret: secrets "collector-token" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-logging": no relationship found between node 'crc' and this object Dec 10 00:47:48 crc kubenswrapper[4884]: E1210 00:47:48.426031 4884 reflector.go:158] "Unhandled Error" err="object-\"openshift-logging\"/\"collector-token\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"collector-token\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-logging\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.426458 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-tr4gf" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.434496 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.440009 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.470084 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-vsmqx"] Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.498095 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/10166585-14e0-4d4a-8208-a7381bff2089-datadir\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.498144 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/10166585-14e0-4d4a-8208-a7381bff2089-tmp\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.498193 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-entrypoint\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.498286 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-config-openshift-service-cacrt\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.498342 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-metrics\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.498363 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/10166585-14e0-4d4a-8208-a7381bff2089-sa-token\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.498456 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-config\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.498509 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ck9sz\" (UniqueName: \"kubernetes.io/projected/10166585-14e0-4d4a-8208-a7381bff2089-kube-api-access-ck9sz\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.498545 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-trusted-ca\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.498585 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-syslog-receiver\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.498612 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-token\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.505661 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-vsmqx"] Dec 10 00:47:48 crc kubenswrapper[4884]: E1210 00:47:48.506139 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[collector-syslog-receiver collector-token config config-openshift-service-cacrt datadir entrypoint kube-api-access-ck9sz metrics sa-token tmp trusted-ca], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-logging/collector-vsmqx" podUID="10166585-14e0-4d4a-8208-a7381bff2089" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.600238 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-token\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.600338 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/10166585-14e0-4d4a-8208-a7381bff2089-tmp\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.600367 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/10166585-14e0-4d4a-8208-a7381bff2089-datadir\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.600456 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-entrypoint\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.600483 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-config-openshift-service-cacrt\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.600507 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-metrics\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.600528 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/10166585-14e0-4d4a-8208-a7381bff2089-sa-token\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.600534 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/10166585-14e0-4d4a-8208-a7381bff2089-datadir\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.600565 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-config\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.600653 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ck9sz\" (UniqueName: \"kubernetes.io/projected/10166585-14e0-4d4a-8208-a7381bff2089-kube-api-access-ck9sz\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.600693 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-trusted-ca\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.600744 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-syslog-receiver\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: E1210 00:47:48.600911 4884 secret.go:188] Couldn't get secret openshift-logging/collector-syslog-receiver: secret "collector-syslog-receiver" not found Dec 10 00:47:48 crc kubenswrapper[4884]: E1210 00:47:48.600962 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-syslog-receiver podName:10166585-14e0-4d4a-8208-a7381bff2089 nodeName:}" failed. No retries permitted until 2025-12-10 00:47:49.100944502 +0000 UTC m=+1042.178901619 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "collector-syslog-receiver" (UniqueName: "kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-syslog-receiver") pod "collector-vsmqx" (UID: "10166585-14e0-4d4a-8208-a7381bff2089") : secret "collector-syslog-receiver" not found Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.601368 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-config\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.601452 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-config-openshift-service-cacrt\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.601714 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-trusted-ca\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.601856 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-entrypoint\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.609108 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/10166585-14e0-4d4a-8208-a7381bff2089-tmp\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.619004 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ck9sz\" (UniqueName: \"kubernetes.io/projected/10166585-14e0-4d4a-8208-a7381bff2089-kube-api-access-ck9sz\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.629997 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/10166585-14e0-4d4a-8208-a7381bff2089-sa-token\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.860632 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.869411 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-vsmqx" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.905047 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-config\") pod \"10166585-14e0-4d4a-8208-a7381bff2089\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.905137 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/10166585-14e0-4d4a-8208-a7381bff2089-sa-token\") pod \"10166585-14e0-4d4a-8208-a7381bff2089\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.905172 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ck9sz\" (UniqueName: \"kubernetes.io/projected/10166585-14e0-4d4a-8208-a7381bff2089-kube-api-access-ck9sz\") pod \"10166585-14e0-4d4a-8208-a7381bff2089\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.905199 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/10166585-14e0-4d4a-8208-a7381bff2089-datadir\") pod \"10166585-14e0-4d4a-8208-a7381bff2089\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.905237 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/10166585-14e0-4d4a-8208-a7381bff2089-tmp\") pod \"10166585-14e0-4d4a-8208-a7381bff2089\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.905309 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-config-openshift-service-cacrt\") pod \"10166585-14e0-4d4a-8208-a7381bff2089\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.905365 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-entrypoint\") pod \"10166585-14e0-4d4a-8208-a7381bff2089\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.905397 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-trusted-ca\") pod \"10166585-14e0-4d4a-8208-a7381bff2089\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.905403 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/10166585-14e0-4d4a-8208-a7381bff2089-datadir" (OuterVolumeSpecName: "datadir") pod "10166585-14e0-4d4a-8208-a7381bff2089" (UID: "10166585-14e0-4d4a-8208-a7381bff2089"). InnerVolumeSpecName "datadir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.905746 4884 reconciler_common.go:293] "Volume detached for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/10166585-14e0-4d4a-8208-a7381bff2089-datadir\") on node \"crc\" DevicePath \"\"" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.905886 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-config-openshift-service-cacrt" (OuterVolumeSpecName: "config-openshift-service-cacrt") pod "10166585-14e0-4d4a-8208-a7381bff2089" (UID: "10166585-14e0-4d4a-8208-a7381bff2089"). InnerVolumeSpecName "config-openshift-service-cacrt". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.906123 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-entrypoint" (OuterVolumeSpecName: "entrypoint") pod "10166585-14e0-4d4a-8208-a7381bff2089" (UID: "10166585-14e0-4d4a-8208-a7381bff2089"). InnerVolumeSpecName "entrypoint". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.906033 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-config" (OuterVolumeSpecName: "config") pod "10166585-14e0-4d4a-8208-a7381bff2089" (UID: "10166585-14e0-4d4a-8208-a7381bff2089"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.906595 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "10166585-14e0-4d4a-8208-a7381bff2089" (UID: "10166585-14e0-4d4a-8208-a7381bff2089"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.910940 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10166585-14e0-4d4a-8208-a7381bff2089-sa-token" (OuterVolumeSpecName: "sa-token") pod "10166585-14e0-4d4a-8208-a7381bff2089" (UID: "10166585-14e0-4d4a-8208-a7381bff2089"). InnerVolumeSpecName "sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.911263 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10166585-14e0-4d4a-8208-a7381bff2089-kube-api-access-ck9sz" (OuterVolumeSpecName: "kube-api-access-ck9sz") pod "10166585-14e0-4d4a-8208-a7381bff2089" (UID: "10166585-14e0-4d4a-8208-a7381bff2089"). InnerVolumeSpecName "kube-api-access-ck9sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:47:48 crc kubenswrapper[4884]: I1210 00:47:48.911709 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10166585-14e0-4d4a-8208-a7381bff2089-tmp" (OuterVolumeSpecName: "tmp") pod "10166585-14e0-4d4a-8208-a7381bff2089" (UID: "10166585-14e0-4d4a-8208-a7381bff2089"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.007648 4884 reconciler_common.go:293] "Volume detached for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-entrypoint\") on node \"crc\" DevicePath \"\"" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.007683 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.007693 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.007703 4884 reconciler_common.go:293] "Volume detached for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/10166585-14e0-4d4a-8208-a7381bff2089-sa-token\") on node \"crc\" DevicePath \"\"" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.007711 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ck9sz\" (UniqueName: \"kubernetes.io/projected/10166585-14e0-4d4a-8208-a7381bff2089-kube-api-access-ck9sz\") on node \"crc\" DevicePath \"\"" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.007723 4884 reconciler_common.go:293] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/10166585-14e0-4d4a-8208-a7381bff2089-tmp\") on node \"crc\" DevicePath \"\"" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.007732 4884 reconciler_common.go:293] "Volume detached for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/10166585-14e0-4d4a-8208-a7381bff2089-config-openshift-service-cacrt\") on node \"crc\" DevicePath \"\"" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.108836 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-syslog-receiver\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.113071 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-syslog-receiver\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.210275 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-syslog-receiver\") pod \"10166585-14e0-4d4a-8208-a7381bff2089\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.215524 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-syslog-receiver" (OuterVolumeSpecName: "collector-syslog-receiver") pod "10166585-14e0-4d4a-8208-a7381bff2089" (UID: "10166585-14e0-4d4a-8208-a7381bff2089"). InnerVolumeSpecName "collector-syslog-receiver". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.312507 4884 reconciler_common.go:293] "Volume detached for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-syslog-receiver\") on node \"crc\" DevicePath \"\"" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.467818 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.473789 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-token\") pod \"collector-vsmqx\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " pod="openshift-logging/collector-vsmqx" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.515231 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-token\") pod \"10166585-14e0-4d4a-8208-a7381bff2089\" (UID: \"10166585-14e0-4d4a-8208-a7381bff2089\") " Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.521022 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-token" (OuterVolumeSpecName: "collector-token") pod "10166585-14e0-4d4a-8208-a7381bff2089" (UID: "10166585-14e0-4d4a-8208-a7381bff2089"). InnerVolumeSpecName "collector-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:47:49 crc kubenswrapper[4884]: E1210 00:47:49.601143 4884 secret.go:188] Couldn't get secret openshift-logging/collector-metrics: failed to sync secret cache: timed out waiting for the condition Dec 10 00:47:49 crc kubenswrapper[4884]: E1210 00:47:49.601252 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-metrics podName:10166585-14e0-4d4a-8208-a7381bff2089 nodeName:}" failed. No retries permitted until 2025-12-10 00:47:50.101230535 +0000 UTC m=+1043.179187672 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics" (UniqueName: "kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-metrics") pod "collector-vsmqx" (UID: "10166585-14e0-4d4a-8208-a7381bff2089") : failed to sync secret cache: timed out waiting for the condition Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.617074 4884 reconciler_common.go:293] "Volume detached for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-collector-token\") on node \"crc\" DevicePath \"\"" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.871487 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-vsmqx" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.916010 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-logging/collector-vsmqx"] Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.928329 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-logging/collector-vsmqx"] Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.937091 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-logging/collector-drtt8"] Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.938770 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-drtt8" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.940945 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-syslog-receiver" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.943280 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-dockercfg-tr4gf" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.945547 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-token" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.945664 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-logging"/"collector-metrics" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.946297 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-config" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.954552 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-logging"/"collector-trustbundle" Dec 10 00:47:49 crc kubenswrapper[4884]: I1210 00:47:49.958849 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-drtt8"] Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.124388 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf56411e-549f-4625-87fb-59932e4e58ed-config\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.124528 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/bf56411e-549f-4625-87fb-59932e4e58ed-tmp\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.124561 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/bf56411e-549f-4625-87fb-59932e4e58ed-entrypoint\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.124606 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/bf56411e-549f-4625-87fb-59932e4e58ed-collector-syslog-receiver\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.124757 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/bf56411e-549f-4625-87fb-59932e4e58ed-metrics\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.124835 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf56411e-549f-4625-87fb-59932e4e58ed-trusted-ca\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.124874 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/bf56411e-549f-4625-87fb-59932e4e58ed-collector-token\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.124912 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/bf56411e-549f-4625-87fb-59932e4e58ed-config-openshift-service-cacrt\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.125010 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86p28\" (UniqueName: \"kubernetes.io/projected/bf56411e-549f-4625-87fb-59932e4e58ed-kube-api-access-86p28\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.125061 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/bf56411e-549f-4625-87fb-59932e4e58ed-datadir\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.125163 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/bf56411e-549f-4625-87fb-59932e4e58ed-sa-token\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.125293 4884 reconciler_common.go:293] "Volume detached for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/10166585-14e0-4d4a-8208-a7381bff2089-metrics\") on node \"crc\" DevicePath \"\"" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.226000 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/bf56411e-549f-4625-87fb-59932e4e58ed-config-openshift-service-cacrt\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.226063 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86p28\" (UniqueName: \"kubernetes.io/projected/bf56411e-549f-4625-87fb-59932e4e58ed-kube-api-access-86p28\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.226097 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/bf56411e-549f-4625-87fb-59932e4e58ed-datadir\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.226127 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/bf56411e-549f-4625-87fb-59932e4e58ed-sa-token\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.226158 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf56411e-549f-4625-87fb-59932e4e58ed-config\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.226175 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/bf56411e-549f-4625-87fb-59932e4e58ed-tmp\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.226188 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/bf56411e-549f-4625-87fb-59932e4e58ed-entrypoint\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.226193 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"datadir\" (UniqueName: \"kubernetes.io/host-path/bf56411e-549f-4625-87fb-59932e4e58ed-datadir\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.226212 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/bf56411e-549f-4625-87fb-59932e4e58ed-collector-syslog-receiver\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.226302 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/bf56411e-549f-4625-87fb-59932e4e58ed-metrics\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.226353 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf56411e-549f-4625-87fb-59932e4e58ed-trusted-ca\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.226387 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/bf56411e-549f-4625-87fb-59932e4e58ed-collector-token\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.227272 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-openshift-service-cacrt\" (UniqueName: \"kubernetes.io/configmap/bf56411e-549f-4625-87fb-59932e4e58ed-config-openshift-service-cacrt\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.227608 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"entrypoint\" (UniqueName: \"kubernetes.io/configmap/bf56411e-549f-4625-87fb-59932e4e58ed-entrypoint\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.227716 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf56411e-549f-4625-87fb-59932e4e58ed-config\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.228408 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf56411e-549f-4625-87fb-59932e4e58ed-trusted-ca\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.229203 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/bf56411e-549f-4625-87fb-59932e4e58ed-tmp\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.229339 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-syslog-receiver\" (UniqueName: \"kubernetes.io/secret/bf56411e-549f-4625-87fb-59932e4e58ed-collector-syslog-receiver\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.229676 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"collector-token\" (UniqueName: \"kubernetes.io/secret/bf56411e-549f-4625-87fb-59932e4e58ed-collector-token\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.231366 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/secret/bf56411e-549f-4625-87fb-59932e4e58ed-metrics\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.245746 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86p28\" (UniqueName: \"kubernetes.io/projected/bf56411e-549f-4625-87fb-59932e4e58ed-kube-api-access-86p28\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.247038 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sa-token\" (UniqueName: \"kubernetes.io/projected/bf56411e-549f-4625-87fb-59932e4e58ed-sa-token\") pod \"collector-drtt8\" (UID: \"bf56411e-549f-4625-87fb-59932e4e58ed\") " pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.264214 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-logging/collector-drtt8" Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.519449 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-logging/collector-drtt8"] Dec 10 00:47:50 crc kubenswrapper[4884]: I1210 00:47:50.881379 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-drtt8" event={"ID":"bf56411e-549f-4625-87fb-59932e4e58ed","Type":"ContainerStarted","Data":"6b458f41ea45920b6028934bcdc0dbb57486e12cb34c299d49d6d51612727ef7"} Dec 10 00:47:51 crc kubenswrapper[4884]: I1210 00:47:51.296110 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10166585-14e0-4d4a-8208-a7381bff2089" path="/var/lib/kubelet/pods/10166585-14e0-4d4a-8208-a7381bff2089/volumes" Dec 10 00:48:11 crc kubenswrapper[4884]: I1210 00:48:11.029483 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-logging/collector-drtt8" event={"ID":"bf56411e-549f-4625-87fb-59932e4e58ed","Type":"ContainerStarted","Data":"b561d81a4ed39ac8484c78fddd7506e0e5a65c77ffeb8875a313adf199f816b7"} Dec 10 00:48:11 crc kubenswrapper[4884]: I1210 00:48:11.061666 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-logging/collector-drtt8" podStartSLOduration=2.356230343 podStartE2EDuration="22.061641519s" podCreationTimestamp="2025-12-10 00:47:49 +0000 UTC" firstStartedPulling="2025-12-10 00:47:50.5546022 +0000 UTC m=+1043.632559337" lastFinishedPulling="2025-12-10 00:48:10.260013396 +0000 UTC m=+1063.337970513" observedRunningTime="2025-12-10 00:48:11.051930154 +0000 UTC m=+1064.129887281" watchObservedRunningTime="2025-12-10 00:48:11.061641519 +0000 UTC m=+1064.139598646" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.069122 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk"] Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.071921 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.074575 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.082928 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk"] Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.208896 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk\" (UID: \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.208985 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk\" (UID: \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.209028 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tj9z\" (UniqueName: \"kubernetes.io/projected/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-kube-api-access-2tj9z\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk\" (UID: \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.310567 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk\" (UID: \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.310693 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk\" (UID: \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.310752 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tj9z\" (UniqueName: \"kubernetes.io/projected/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-kube-api-access-2tj9z\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk\" (UID: \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.311331 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk\" (UID: \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.311485 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk\" (UID: \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.342735 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tj9z\" (UniqueName: \"kubernetes.io/projected/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-kube-api-access-2tj9z\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk\" (UID: \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.399594 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:22 crc kubenswrapper[4884]: I1210 00:48:22.828332 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk"] Dec 10 00:48:23 crc kubenswrapper[4884]: I1210 00:48:23.120410 4884 generic.go:334] "Generic (PLEG): container finished" podID="54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" containerID="ee59820445db12dd56488cf5d3b24d25d82fde7004197553a25989e258507ab2" exitCode=0 Dec 10 00:48:23 crc kubenswrapper[4884]: I1210 00:48:23.120548 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" event={"ID":"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8","Type":"ContainerDied","Data":"ee59820445db12dd56488cf5d3b24d25d82fde7004197553a25989e258507ab2"} Dec 10 00:48:23 crc kubenswrapper[4884]: I1210 00:48:23.120701 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" event={"ID":"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8","Type":"ContainerStarted","Data":"8ae3d222e4376c963e54fe8b308e6978f7a55ecd26fe04749094431e8a57d368"} Dec 10 00:48:33 crc kubenswrapper[4884]: I1210 00:48:33.201225 4884 generic.go:334] "Generic (PLEG): container finished" podID="54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" containerID="8b9a2c8ec275ebcc1655e8dd31d23bb2739ab6c515cb3e8d0e994cf322c621c3" exitCode=0 Dec 10 00:48:33 crc kubenswrapper[4884]: I1210 00:48:33.201294 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" event={"ID":"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8","Type":"ContainerDied","Data":"8b9a2c8ec275ebcc1655e8dd31d23bb2739ab6c515cb3e8d0e994cf322c621c3"} Dec 10 00:48:34 crc kubenswrapper[4884]: I1210 00:48:34.213720 4884 generic.go:334] "Generic (PLEG): container finished" podID="54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" containerID="bc75f3c9c50fd9a6090c5132fd8512a4afee85d6293656989465dcd9056ff2bd" exitCode=0 Dec 10 00:48:34 crc kubenswrapper[4884]: I1210 00:48:34.213800 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" event={"ID":"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8","Type":"ContainerDied","Data":"bc75f3c9c50fd9a6090c5132fd8512a4afee85d6293656989465dcd9056ff2bd"} Dec 10 00:48:35 crc kubenswrapper[4884]: I1210 00:48:35.545063 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:35 crc kubenswrapper[4884]: I1210 00:48:35.604057 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tj9z\" (UniqueName: \"kubernetes.io/projected/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-kube-api-access-2tj9z\") pod \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\" (UID: \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\") " Dec 10 00:48:35 crc kubenswrapper[4884]: I1210 00:48:35.604136 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-bundle\") pod \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\" (UID: \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\") " Dec 10 00:48:35 crc kubenswrapper[4884]: I1210 00:48:35.604183 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-util\") pod \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\" (UID: \"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8\") " Dec 10 00:48:35 crc kubenswrapper[4884]: I1210 00:48:35.604642 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-bundle" (OuterVolumeSpecName: "bundle") pod "54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" (UID: "54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:48:35 crc kubenswrapper[4884]: I1210 00:48:35.608944 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-kube-api-access-2tj9z" (OuterVolumeSpecName: "kube-api-access-2tj9z") pod "54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" (UID: "54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8"). InnerVolumeSpecName "kube-api-access-2tj9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:48:35 crc kubenswrapper[4884]: I1210 00:48:35.620708 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-util" (OuterVolumeSpecName: "util") pod "54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" (UID: "54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:48:35 crc kubenswrapper[4884]: I1210 00:48:35.705258 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2tj9z\" (UniqueName: \"kubernetes.io/projected/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-kube-api-access-2tj9z\") on node \"crc\" DevicePath \"\"" Dec 10 00:48:35 crc kubenswrapper[4884]: I1210 00:48:35.705291 4884 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:48:35 crc kubenswrapper[4884]: I1210 00:48:35.705300 4884 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8-util\") on node \"crc\" DevicePath \"\"" Dec 10 00:48:36 crc kubenswrapper[4884]: I1210 00:48:36.233186 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" event={"ID":"54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8","Type":"ContainerDied","Data":"8ae3d222e4376c963e54fe8b308e6978f7a55ecd26fe04749094431e8a57d368"} Dec 10 00:48:36 crc kubenswrapper[4884]: I1210 00:48:36.233259 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ae3d222e4376c963e54fe8b308e6978f7a55ecd26fe04749094431e8a57d368" Dec 10 00:48:36 crc kubenswrapper[4884]: I1210 00:48:36.233284 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk" Dec 10 00:48:38 crc kubenswrapper[4884]: I1210 00:48:38.992112 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-2mw5c"] Dec 10 00:48:38 crc kubenswrapper[4884]: E1210 00:48:38.992711 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" containerName="pull" Dec 10 00:48:38 crc kubenswrapper[4884]: I1210 00:48:38.992726 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" containerName="pull" Dec 10 00:48:38 crc kubenswrapper[4884]: E1210 00:48:38.992745 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" containerName="util" Dec 10 00:48:38 crc kubenswrapper[4884]: I1210 00:48:38.992752 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" containerName="util" Dec 10 00:48:38 crc kubenswrapper[4884]: E1210 00:48:38.992765 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" containerName="extract" Dec 10 00:48:38 crc kubenswrapper[4884]: I1210 00:48:38.992772 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" containerName="extract" Dec 10 00:48:38 crc kubenswrapper[4884]: I1210 00:48:38.992893 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8" containerName="extract" Dec 10 00:48:38 crc kubenswrapper[4884]: I1210 00:48:38.993349 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2mw5c" Dec 10 00:48:38 crc kubenswrapper[4884]: I1210 00:48:38.995973 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-rrzgt" Dec 10 00:48:38 crc kubenswrapper[4884]: I1210 00:48:38.996360 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Dec 10 00:48:38 crc kubenswrapper[4884]: I1210 00:48:38.996555 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Dec 10 00:48:39 crc kubenswrapper[4884]: I1210 00:48:39.006753 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-2mw5c"] Dec 10 00:48:39 crc kubenswrapper[4884]: I1210 00:48:39.088799 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crkfw\" (UniqueName: \"kubernetes.io/projected/fe41be34-f133-4e4f-81e9-2d58cde00923-kube-api-access-crkfw\") pod \"nmstate-operator-5b5b58f5c8-2mw5c\" (UID: \"fe41be34-f133-4e4f-81e9-2d58cde00923\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2mw5c" Dec 10 00:48:39 crc kubenswrapper[4884]: I1210 00:48:39.190594 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crkfw\" (UniqueName: \"kubernetes.io/projected/fe41be34-f133-4e4f-81e9-2d58cde00923-kube-api-access-crkfw\") pod \"nmstate-operator-5b5b58f5c8-2mw5c\" (UID: \"fe41be34-f133-4e4f-81e9-2d58cde00923\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2mw5c" Dec 10 00:48:39 crc kubenswrapper[4884]: I1210 00:48:39.223034 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crkfw\" (UniqueName: \"kubernetes.io/projected/fe41be34-f133-4e4f-81e9-2d58cde00923-kube-api-access-crkfw\") pod \"nmstate-operator-5b5b58f5c8-2mw5c\" (UID: \"fe41be34-f133-4e4f-81e9-2d58cde00923\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2mw5c" Dec 10 00:48:39 crc kubenswrapper[4884]: I1210 00:48:39.320054 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2mw5c" Dec 10 00:48:39 crc kubenswrapper[4884]: I1210 00:48:39.775481 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-2mw5c"] Dec 10 00:48:40 crc kubenswrapper[4884]: I1210 00:48:40.267533 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2mw5c" event={"ID":"fe41be34-f133-4e4f-81e9-2d58cde00923","Type":"ContainerStarted","Data":"515ff81bd5ca74b8e8f0f8b4f95a2a67dac42be2624b32ac37b9ee20d958c718"} Dec 10 00:48:42 crc kubenswrapper[4884]: I1210 00:48:42.282900 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2mw5c" event={"ID":"fe41be34-f133-4e4f-81e9-2d58cde00923","Type":"ContainerStarted","Data":"e9d27e1b258ec5e4529c5db970e67c1bbac019f482e656035bf64c5bbd99af1c"} Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.295987 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-2mw5c" podStartSLOduration=3.291048377 podStartE2EDuration="5.295962806s" podCreationTimestamp="2025-12-10 00:48:38 +0000 UTC" firstStartedPulling="2025-12-10 00:48:39.782601619 +0000 UTC m=+1092.860558746" lastFinishedPulling="2025-12-10 00:48:41.787516068 +0000 UTC m=+1094.865473175" observedRunningTime="2025-12-10 00:48:42.309731724 +0000 UTC m=+1095.387688851" watchObservedRunningTime="2025-12-10 00:48:43.295962806 +0000 UTC m=+1096.373919923" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.298775 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-pqmdn"] Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.300064 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pqmdn" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.303199 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-mbh5c" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.310544 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf"] Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.311855 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.313180 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.322992 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-pqmdn"] Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.342030 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-pdjzx"] Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.342821 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.359462 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf"] Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.363131 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d74p9\" (UniqueName: \"kubernetes.io/projected/36ef55f9-d3ad-47fb-99d8-0cfe51482d57-kube-api-access-d74p9\") pod \"nmstate-webhook-5f6d4c5ccb-d4wwf\" (UID: \"36ef55f9-d3ad-47fb-99d8-0cfe51482d57\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.363184 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/36ef55f9-d3ad-47fb-99d8-0cfe51482d57-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-d4wwf\" (UID: \"36ef55f9-d3ad-47fb-99d8-0cfe51482d57\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.363278 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42lrb\" (UniqueName: \"kubernetes.io/projected/d0acd1d8-d03c-467e-8471-481db90de737-kube-api-access-42lrb\") pod \"nmstate-metrics-7f946cbc9-pqmdn\" (UID: \"d0acd1d8-d03c-467e-8471-481db90de737\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pqmdn" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.433137 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk"] Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.433941 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.435717 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.435780 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.435935 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-92ftd" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.452622 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk"] Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.464577 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d74p9\" (UniqueName: \"kubernetes.io/projected/36ef55f9-d3ad-47fb-99d8-0cfe51482d57-kube-api-access-d74p9\") pod \"nmstate-webhook-5f6d4c5ccb-d4wwf\" (UID: \"36ef55f9-d3ad-47fb-99d8-0cfe51482d57\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.464642 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/36ef55f9-d3ad-47fb-99d8-0cfe51482d57-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-d4wwf\" (UID: \"36ef55f9-d3ad-47fb-99d8-0cfe51482d57\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.464678 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/9ede9448-a62c-49f3-8b99-69d971a74e83-ovs-socket\") pod \"nmstate-handler-pdjzx\" (UID: \"9ede9448-a62c-49f3-8b99-69d971a74e83\") " pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.464706 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rllm\" (UniqueName: \"kubernetes.io/projected/9ede9448-a62c-49f3-8b99-69d971a74e83-kube-api-access-6rllm\") pod \"nmstate-handler-pdjzx\" (UID: \"9ede9448-a62c-49f3-8b99-69d971a74e83\") " pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.464742 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/9ede9448-a62c-49f3-8b99-69d971a74e83-dbus-socket\") pod \"nmstate-handler-pdjzx\" (UID: \"9ede9448-a62c-49f3-8b99-69d971a74e83\") " pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.464764 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42lrb\" (UniqueName: \"kubernetes.io/projected/d0acd1d8-d03c-467e-8471-481db90de737-kube-api-access-42lrb\") pod \"nmstate-metrics-7f946cbc9-pqmdn\" (UID: \"d0acd1d8-d03c-467e-8471-481db90de737\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pqmdn" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.464800 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/9ede9448-a62c-49f3-8b99-69d971a74e83-nmstate-lock\") pod \"nmstate-handler-pdjzx\" (UID: \"9ede9448-a62c-49f3-8b99-69d971a74e83\") " pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: E1210 00:48:43.464921 4884 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Dec 10 00:48:43 crc kubenswrapper[4884]: E1210 00:48:43.464963 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36ef55f9-d3ad-47fb-99d8-0cfe51482d57-tls-key-pair podName:36ef55f9-d3ad-47fb-99d8-0cfe51482d57 nodeName:}" failed. No retries permitted until 2025-12-10 00:48:43.964947593 +0000 UTC m=+1097.042904710 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/36ef55f9-d3ad-47fb-99d8-0cfe51482d57-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-d4wwf" (UID: "36ef55f9-d3ad-47fb-99d8-0cfe51482d57") : secret "openshift-nmstate-webhook" not found Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.487678 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42lrb\" (UniqueName: \"kubernetes.io/projected/d0acd1d8-d03c-467e-8471-481db90de737-kube-api-access-42lrb\") pod \"nmstate-metrics-7f946cbc9-pqmdn\" (UID: \"d0acd1d8-d03c-467e-8471-481db90de737\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pqmdn" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.492300 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d74p9\" (UniqueName: \"kubernetes.io/projected/36ef55f9-d3ad-47fb-99d8-0cfe51482d57-kube-api-access-d74p9\") pod \"nmstate-webhook-5f6d4c5ccb-d4wwf\" (UID: \"36ef55f9-d3ad-47fb-99d8-0cfe51482d57\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.565619 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rllm\" (UniqueName: \"kubernetes.io/projected/9ede9448-a62c-49f3-8b99-69d971a74e83-kube-api-access-6rllm\") pod \"nmstate-handler-pdjzx\" (UID: \"9ede9448-a62c-49f3-8b99-69d971a74e83\") " pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.565701 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/9ede9448-a62c-49f3-8b99-69d971a74e83-dbus-socket\") pod \"nmstate-handler-pdjzx\" (UID: \"9ede9448-a62c-49f3-8b99-69d971a74e83\") " pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.565738 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/7555d5e8-0cff-4ec4-9b26-dfa45466de89-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-kmxrk\" (UID: \"7555d5e8-0cff-4ec4-9b26-dfa45466de89\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.565794 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swv6g\" (UniqueName: \"kubernetes.io/projected/7555d5e8-0cff-4ec4-9b26-dfa45466de89-kube-api-access-swv6g\") pod \"nmstate-console-plugin-7fbb5f6569-kmxrk\" (UID: \"7555d5e8-0cff-4ec4-9b26-dfa45466de89\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.565824 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/9ede9448-a62c-49f3-8b99-69d971a74e83-nmstate-lock\") pod \"nmstate-handler-pdjzx\" (UID: \"9ede9448-a62c-49f3-8b99-69d971a74e83\") " pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.565896 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/9ede9448-a62c-49f3-8b99-69d971a74e83-ovs-socket\") pod \"nmstate-handler-pdjzx\" (UID: \"9ede9448-a62c-49f3-8b99-69d971a74e83\") " pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.565927 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/7555d5e8-0cff-4ec4-9b26-dfa45466de89-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-kmxrk\" (UID: \"7555d5e8-0cff-4ec4-9b26-dfa45466de89\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.566560 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/9ede9448-a62c-49f3-8b99-69d971a74e83-dbus-socket\") pod \"nmstate-handler-pdjzx\" (UID: \"9ede9448-a62c-49f3-8b99-69d971a74e83\") " pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.566577 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/9ede9448-a62c-49f3-8b99-69d971a74e83-nmstate-lock\") pod \"nmstate-handler-pdjzx\" (UID: \"9ede9448-a62c-49f3-8b99-69d971a74e83\") " pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.566615 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/9ede9448-a62c-49f3-8b99-69d971a74e83-ovs-socket\") pod \"nmstate-handler-pdjzx\" (UID: \"9ede9448-a62c-49f3-8b99-69d971a74e83\") " pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.586033 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rllm\" (UniqueName: \"kubernetes.io/projected/9ede9448-a62c-49f3-8b99-69d971a74e83-kube-api-access-6rllm\") pod \"nmstate-handler-pdjzx\" (UID: \"9ede9448-a62c-49f3-8b99-69d971a74e83\") " pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.623560 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pqmdn" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.642825 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-fb8bc9c44-vbbc8"] Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.651554 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.658208 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.662959 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-fb8bc9c44-vbbc8"] Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.666837 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/7555d5e8-0cff-4ec4-9b26-dfa45466de89-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-kmxrk\" (UID: \"7555d5e8-0cff-4ec4-9b26-dfa45466de89\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.666897 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swv6g\" (UniqueName: \"kubernetes.io/projected/7555d5e8-0cff-4ec4-9b26-dfa45466de89-kube-api-access-swv6g\") pod \"nmstate-console-plugin-7fbb5f6569-kmxrk\" (UID: \"7555d5e8-0cff-4ec4-9b26-dfa45466de89\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.666973 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/7555d5e8-0cff-4ec4-9b26-dfa45466de89-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-kmxrk\" (UID: \"7555d5e8-0cff-4ec4-9b26-dfa45466de89\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.667858 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/7555d5e8-0cff-4ec4-9b26-dfa45466de89-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-kmxrk\" (UID: \"7555d5e8-0cff-4ec4-9b26-dfa45466de89\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.674026 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/7555d5e8-0cff-4ec4-9b26-dfa45466de89-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-kmxrk\" (UID: \"7555d5e8-0cff-4ec4-9b26-dfa45466de89\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.690098 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swv6g\" (UniqueName: \"kubernetes.io/projected/7555d5e8-0cff-4ec4-9b26-dfa45466de89-kube-api-access-swv6g\") pod \"nmstate-console-plugin-7fbb5f6569-kmxrk\" (UID: \"7555d5e8-0cff-4ec4-9b26-dfa45466de89\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.753079 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.772770 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a6857870-023d-4183-9ab2-7c582110e7ad-console-serving-cert\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.772824 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-trusted-ca-bundle\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.772858 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a6857870-023d-4183-9ab2-7c582110e7ad-console-oauth-config\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.772886 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-console-config\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.772929 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-oauth-serving-cert\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.773060 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bm2ht\" (UniqueName: \"kubernetes.io/projected/a6857870-023d-4183-9ab2-7c582110e7ad-kube-api-access-bm2ht\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.773466 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-service-ca\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.874160 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-service-ca\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.874197 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a6857870-023d-4183-9ab2-7c582110e7ad-console-serving-cert\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.874229 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-trusted-ca-bundle\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.874250 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a6857870-023d-4183-9ab2-7c582110e7ad-console-oauth-config\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.874265 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-console-config\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.874293 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-oauth-serving-cert\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.874345 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bm2ht\" (UniqueName: \"kubernetes.io/projected/a6857870-023d-4183-9ab2-7c582110e7ad-kube-api-access-bm2ht\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.875820 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-service-ca\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.876409 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-oauth-serving-cert\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.876544 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-trusted-ca-bundle\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.876627 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-console-config\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.879891 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a6857870-023d-4183-9ab2-7c582110e7ad-console-oauth-config\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.880119 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a6857870-023d-4183-9ab2-7c582110e7ad-console-serving-cert\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.893815 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm2ht\" (UniqueName: \"kubernetes.io/projected/a6857870-023d-4183-9ab2-7c582110e7ad-kube-api-access-bm2ht\") pod \"console-fb8bc9c44-vbbc8\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.975786 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/36ef55f9-d3ad-47fb-99d8-0cfe51482d57-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-d4wwf\" (UID: \"36ef55f9-d3ad-47fb-99d8-0cfe51482d57\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" Dec 10 00:48:43 crc kubenswrapper[4884]: I1210 00:48:43.979129 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/36ef55f9-d3ad-47fb-99d8-0cfe51482d57-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-d4wwf\" (UID: \"36ef55f9-d3ad-47fb-99d8-0cfe51482d57\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" Dec 10 00:48:44 crc kubenswrapper[4884]: I1210 00:48:44.004606 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:44 crc kubenswrapper[4884]: I1210 00:48:44.101557 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-pqmdn"] Dec 10 00:48:44 crc kubenswrapper[4884]: I1210 00:48:44.191177 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk"] Dec 10 00:48:44 crc kubenswrapper[4884]: I1210 00:48:44.240779 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" Dec 10 00:48:44 crc kubenswrapper[4884]: I1210 00:48:44.296209 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-pdjzx" event={"ID":"9ede9448-a62c-49f3-8b99-69d971a74e83","Type":"ContainerStarted","Data":"ee410a1c65772008ffc7151b01aeca0217ea18c4cd369ba439553c550bda20ea"} Dec 10 00:48:44 crc kubenswrapper[4884]: I1210 00:48:44.297260 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pqmdn" event={"ID":"d0acd1d8-d03c-467e-8471-481db90de737","Type":"ContainerStarted","Data":"f42b3f37c6270f489ed39636ba9232cbfe147e18ffabdd7c67d49360efcb681b"} Dec 10 00:48:44 crc kubenswrapper[4884]: I1210 00:48:44.297927 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" event={"ID":"7555d5e8-0cff-4ec4-9b26-dfa45466de89","Type":"ContainerStarted","Data":"d4fe6c2181337309a3b7c3e80993c4ab23d8d554178de80704870504033b7d34"} Dec 10 00:48:44 crc kubenswrapper[4884]: I1210 00:48:44.455695 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-fb8bc9c44-vbbc8"] Dec 10 00:48:44 crc kubenswrapper[4884]: W1210 00:48:44.462852 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6857870_023d_4183_9ab2_7c582110e7ad.slice/crio-17f07a63d2bcec616ccf04ee3c3c4359aecd73b2a83dc851cf61c2aca3fc0dbf WatchSource:0}: Error finding container 17f07a63d2bcec616ccf04ee3c3c4359aecd73b2a83dc851cf61c2aca3fc0dbf: Status 404 returned error can't find the container with id 17f07a63d2bcec616ccf04ee3c3c4359aecd73b2a83dc851cf61c2aca3fc0dbf Dec 10 00:48:44 crc kubenswrapper[4884]: I1210 00:48:44.707994 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf"] Dec 10 00:48:45 crc kubenswrapper[4884]: I1210 00:48:45.310854 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-fb8bc9c44-vbbc8" event={"ID":"a6857870-023d-4183-9ab2-7c582110e7ad","Type":"ContainerStarted","Data":"183b2f18ab1f60e64e0c910ceaf14a8e412fa52ee5708efd014b957d18517446"} Dec 10 00:48:45 crc kubenswrapper[4884]: I1210 00:48:45.310905 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-fb8bc9c44-vbbc8" event={"ID":"a6857870-023d-4183-9ab2-7c582110e7ad","Type":"ContainerStarted","Data":"17f07a63d2bcec616ccf04ee3c3c4359aecd73b2a83dc851cf61c2aca3fc0dbf"} Dec 10 00:48:45 crc kubenswrapper[4884]: I1210 00:48:45.315759 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" event={"ID":"36ef55f9-d3ad-47fb-99d8-0cfe51482d57","Type":"ContainerStarted","Data":"4e166eeaa6e724a2bec910624a6afd664fdc8a526899450c19b7ca6608430ef4"} Dec 10 00:48:45 crc kubenswrapper[4884]: I1210 00:48:45.335909 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-fb8bc9c44-vbbc8" podStartSLOduration=2.335888969 podStartE2EDuration="2.335888969s" podCreationTimestamp="2025-12-10 00:48:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:48:45.331330159 +0000 UTC m=+1098.409287296" watchObservedRunningTime="2025-12-10 00:48:45.335888969 +0000 UTC m=+1098.413846086" Dec 10 00:48:48 crc kubenswrapper[4884]: I1210 00:48:48.097946 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:48:48 crc kubenswrapper[4884]: I1210 00:48:48.098342 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:48:48 crc kubenswrapper[4884]: I1210 00:48:48.339256 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" event={"ID":"7555d5e8-0cff-4ec4-9b26-dfa45466de89","Type":"ContainerStarted","Data":"2a41cecab79c2b6ee444db516982023a0fd4b6d83ee8cf5923dccb927155b088"} Dec 10 00:48:48 crc kubenswrapper[4884]: I1210 00:48:48.341185 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-pdjzx" event={"ID":"9ede9448-a62c-49f3-8b99-69d971a74e83","Type":"ContainerStarted","Data":"bfbba2868fe3f679fceefd0c06f60f1129122682d4472060a529e4b288be686e"} Dec 10 00:48:48 crc kubenswrapper[4884]: I1210 00:48:48.341310 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:48 crc kubenswrapper[4884]: I1210 00:48:48.343192 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pqmdn" event={"ID":"d0acd1d8-d03c-467e-8471-481db90de737","Type":"ContainerStarted","Data":"2dabd3a701bcb593772754fc72a65b13d4a7b4e74fc164bed46e2aa12cfe183f"} Dec 10 00:48:48 crc kubenswrapper[4884]: I1210 00:48:48.344730 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" event={"ID":"36ef55f9-d3ad-47fb-99d8-0cfe51482d57","Type":"ContainerStarted","Data":"7db9347165b02dcf6178e0265c953f01ecd7a4507e2f45ac13d6e3a7e67b93af"} Dec 10 00:48:48 crc kubenswrapper[4884]: I1210 00:48:48.344879 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" Dec 10 00:48:48 crc kubenswrapper[4884]: I1210 00:48:48.354600 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-kmxrk" podStartSLOduration=1.7908051230000002 podStartE2EDuration="5.354575678s" podCreationTimestamp="2025-12-10 00:48:43 +0000 UTC" firstStartedPulling="2025-12-10 00:48:44.206127132 +0000 UTC m=+1097.284084249" lastFinishedPulling="2025-12-10 00:48:47.769897687 +0000 UTC m=+1100.847854804" observedRunningTime="2025-12-10 00:48:48.351317131 +0000 UTC m=+1101.429274258" watchObservedRunningTime="2025-12-10 00:48:48.354575678 +0000 UTC m=+1101.432532825" Dec 10 00:48:48 crc kubenswrapper[4884]: I1210 00:48:48.385589 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" podStartSLOduration=2.324312284 podStartE2EDuration="5.385564495s" podCreationTimestamp="2025-12-10 00:48:43 +0000 UTC" firstStartedPulling="2025-12-10 00:48:44.711544932 +0000 UTC m=+1097.789502049" lastFinishedPulling="2025-12-10 00:48:47.772797143 +0000 UTC m=+1100.850754260" observedRunningTime="2025-12-10 00:48:48.369491381 +0000 UTC m=+1101.447448508" watchObservedRunningTime="2025-12-10 00:48:48.385564495 +0000 UTC m=+1101.463521612" Dec 10 00:48:48 crc kubenswrapper[4884]: I1210 00:48:48.396071 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-pdjzx" podStartSLOduration=1.319931092 podStartE2EDuration="5.396056301s" podCreationTimestamp="2025-12-10 00:48:43 +0000 UTC" firstStartedPulling="2025-12-10 00:48:43.699129229 +0000 UTC m=+1096.777086336" lastFinishedPulling="2025-12-10 00:48:47.775254428 +0000 UTC m=+1100.853211545" observedRunningTime="2025-12-10 00:48:48.393671038 +0000 UTC m=+1101.471628165" watchObservedRunningTime="2025-12-10 00:48:48.396056301 +0000 UTC m=+1101.474013418" Dec 10 00:48:52 crc kubenswrapper[4884]: I1210 00:48:52.388075 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pqmdn" event={"ID":"d0acd1d8-d03c-467e-8471-481db90de737","Type":"ContainerStarted","Data":"292a48613f5f64fdb1b5055eb6652bb8a293c050a3c4211b0db8bf7c5cd20cc2"} Dec 10 00:48:52 crc kubenswrapper[4884]: I1210 00:48:52.417906 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-pqmdn" podStartSLOduration=1.990123569 podStartE2EDuration="9.417881148s" podCreationTimestamp="2025-12-10 00:48:43 +0000 UTC" firstStartedPulling="2025-12-10 00:48:44.126531942 +0000 UTC m=+1097.204489059" lastFinishedPulling="2025-12-10 00:48:51.554289521 +0000 UTC m=+1104.632246638" observedRunningTime="2025-12-10 00:48:52.412886536 +0000 UTC m=+1105.490843713" watchObservedRunningTime="2025-12-10 00:48:52.417881148 +0000 UTC m=+1105.495838305" Dec 10 00:48:53 crc kubenswrapper[4884]: I1210 00:48:53.702805 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-pdjzx" Dec 10 00:48:54 crc kubenswrapper[4884]: I1210 00:48:54.005743 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:54 crc kubenswrapper[4884]: I1210 00:48:54.005808 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:54 crc kubenswrapper[4884]: I1210 00:48:54.012511 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:54 crc kubenswrapper[4884]: I1210 00:48:54.413340 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:48:54 crc kubenswrapper[4884]: I1210 00:48:54.502536 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5hrps"] Dec 10 00:49:04 crc kubenswrapper[4884]: I1210 00:49:04.254328 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-d4wwf" Dec 10 00:49:18 crc kubenswrapper[4884]: I1210 00:49:18.098341 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:49:18 crc kubenswrapper[4884]: I1210 00:49:18.099030 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:49:19 crc kubenswrapper[4884]: I1210 00:49:19.554545 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-5hrps" podUID="34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" containerName="console" containerID="cri-o://4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2" gracePeriod=15 Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.002268 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5hrps_34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9/console/0.log" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.002606 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.085839 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-oauth-serving-cert\") pod \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.085938 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-oauth-config\") pod \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.085989 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-trusted-ca-bundle\") pod \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.086082 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-serving-cert\") pod \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.086131 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-config\") pod \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.086194 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-service-ca\") pod \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.086232 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzdrj\" (UniqueName: \"kubernetes.io/projected/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-kube-api-access-tzdrj\") pod \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\" (UID: \"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9\") " Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.086816 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" (UID: "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.086852 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-config" (OuterVolumeSpecName: "console-config") pod "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" (UID: "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.086894 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" (UID: "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.086916 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-service-ca" (OuterVolumeSpecName: "service-ca") pod "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" (UID: "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.091781 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-kube-api-access-tzdrj" (OuterVolumeSpecName: "kube-api-access-tzdrj") pod "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" (UID: "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9"). InnerVolumeSpecName "kube-api-access-tzdrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.091839 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" (UID: "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.092372 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" (UID: "34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.188021 4884 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.188063 4884 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.188078 4884 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.188089 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzdrj\" (UniqueName: \"kubernetes.io/projected/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-kube-api-access-tzdrj\") on node \"crc\" DevicePath \"\"" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.188102 4884 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.188115 4884 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.188125 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.620260 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5hrps_34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9/console/0.log" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.620326 4884 generic.go:334] "Generic (PLEG): container finished" podID="34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" containerID="4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2" exitCode=2 Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.620360 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5hrps" event={"ID":"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9","Type":"ContainerDied","Data":"4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2"} Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.620392 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5hrps" event={"ID":"34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9","Type":"ContainerDied","Data":"22a6b40860876c379ee679c335da49b2f29bb3d5823932e71f788bbeb1725d37"} Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.620411 4884 scope.go:117] "RemoveContainer" containerID="4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.620460 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5hrps" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.645346 4884 scope.go:117] "RemoveContainer" containerID="4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2" Dec 10 00:49:20 crc kubenswrapper[4884]: E1210 00:49:20.645877 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2\": container with ID starting with 4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2 not found: ID does not exist" containerID="4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.646119 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2"} err="failed to get container status \"4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2\": rpc error: code = NotFound desc = could not find container \"4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2\": container with ID starting with 4d24b269df8e71a83ea794bc375c67c6a310ac2aba678db0a92d3144db54ccd2 not found: ID does not exist" Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.671638 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5hrps"] Dec 10 00:49:20 crc kubenswrapper[4884]: I1210 00:49:20.677680 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-5hrps"] Dec 10 00:49:21 crc kubenswrapper[4884]: I1210 00:49:21.300150 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" path="/var/lib/kubelet/pods/34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9/volumes" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.411331 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx"] Dec 10 00:49:22 crc kubenswrapper[4884]: E1210 00:49:22.411690 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" containerName="console" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.411705 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" containerName="console" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.411851 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="34e2ea5d-ec04-49d2-a1cb-1c94b696d5a9" containerName="console" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.412855 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.421195 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx"] Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.422276 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.525484 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5x6v\" (UniqueName: \"kubernetes.io/projected/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-kube-api-access-j5x6v\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx\" (UID: \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.525603 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx\" (UID: \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.525685 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx\" (UID: \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.627111 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx\" (UID: \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.627211 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5x6v\" (UniqueName: \"kubernetes.io/projected/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-kube-api-access-j5x6v\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx\" (UID: \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.627268 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx\" (UID: \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.627870 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx\" (UID: \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.627980 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx\" (UID: \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.651792 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5x6v\" (UniqueName: \"kubernetes.io/projected/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-kube-api-access-j5x6v\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx\" (UID: \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.737844 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:22 crc kubenswrapper[4884]: I1210 00:49:22.989232 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx"] Dec 10 00:49:23 crc kubenswrapper[4884]: I1210 00:49:23.652000 4884 generic.go:334] "Generic (PLEG): container finished" podID="8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" containerID="3443a8f40cb33dd7ec244975a65fe055caa592acd3eb2e84a126b9888a1b64cb" exitCode=0 Dec 10 00:49:23 crc kubenswrapper[4884]: I1210 00:49:23.652152 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" event={"ID":"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba","Type":"ContainerDied","Data":"3443a8f40cb33dd7ec244975a65fe055caa592acd3eb2e84a126b9888a1b64cb"} Dec 10 00:49:23 crc kubenswrapper[4884]: I1210 00:49:23.652349 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" event={"ID":"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba","Type":"ContainerStarted","Data":"053b7fed5fa46bb8b778451079506e89de59c861dc5c7abe9ad06d57c4f0820a"} Dec 10 00:49:23 crc kubenswrapper[4884]: I1210 00:49:23.654890 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 00:49:25 crc kubenswrapper[4884]: I1210 00:49:25.666654 4884 generic.go:334] "Generic (PLEG): container finished" podID="8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" containerID="652e707f8e6dad7a2f824be271c8189f36786acf13565c138d88da99054d5fbe" exitCode=0 Dec 10 00:49:25 crc kubenswrapper[4884]: I1210 00:49:25.667060 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" event={"ID":"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba","Type":"ContainerDied","Data":"652e707f8e6dad7a2f824be271c8189f36786acf13565c138d88da99054d5fbe"} Dec 10 00:49:26 crc kubenswrapper[4884]: I1210 00:49:26.675587 4884 generic.go:334] "Generic (PLEG): container finished" podID="8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" containerID="2ab7af9e4379a1a53216e94fd0fd35be2f1b9531b7129d1fb3e485ba7d6c982c" exitCode=0 Dec 10 00:49:26 crc kubenswrapper[4884]: I1210 00:49:26.675632 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" event={"ID":"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba","Type":"ContainerDied","Data":"2ab7af9e4379a1a53216e94fd0fd35be2f1b9531b7129d1fb3e485ba7d6c982c"} Dec 10 00:49:27 crc kubenswrapper[4884]: I1210 00:49:27.978930 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:28 crc kubenswrapper[4884]: I1210 00:49:28.038497 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-util\") pod \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\" (UID: \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\") " Dec 10 00:49:28 crc kubenswrapper[4884]: I1210 00:49:28.038564 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5x6v\" (UniqueName: \"kubernetes.io/projected/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-kube-api-access-j5x6v\") pod \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\" (UID: \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\") " Dec 10 00:49:28 crc kubenswrapper[4884]: I1210 00:49:28.038603 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-bundle\") pod \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\" (UID: \"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba\") " Dec 10 00:49:28 crc kubenswrapper[4884]: I1210 00:49:28.039593 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-bundle" (OuterVolumeSpecName: "bundle") pod "8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" (UID: "8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:49:28 crc kubenswrapper[4884]: I1210 00:49:28.046694 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-kube-api-access-j5x6v" (OuterVolumeSpecName: "kube-api-access-j5x6v") pod "8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" (UID: "8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba"). InnerVolumeSpecName "kube-api-access-j5x6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:49:28 crc kubenswrapper[4884]: I1210 00:49:28.056475 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-util" (OuterVolumeSpecName: "util") pod "8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" (UID: "8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:49:28 crc kubenswrapper[4884]: I1210 00:49:28.139887 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5x6v\" (UniqueName: \"kubernetes.io/projected/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-kube-api-access-j5x6v\") on node \"crc\" DevicePath \"\"" Dec 10 00:49:28 crc kubenswrapper[4884]: I1210 00:49:28.139921 4884 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:49:28 crc kubenswrapper[4884]: I1210 00:49:28.139929 4884 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba-util\") on node \"crc\" DevicePath \"\"" Dec 10 00:49:28 crc kubenswrapper[4884]: I1210 00:49:28.710482 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" event={"ID":"8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba","Type":"ContainerDied","Data":"053b7fed5fa46bb8b778451079506e89de59c861dc5c7abe9ad06d57c4f0820a"} Dec 10 00:49:28 crc kubenswrapper[4884]: I1210 00:49:28.710538 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="053b7fed5fa46bb8b778451079506e89de59c861dc5c7abe9ad06d57c4f0820a" Dec 10 00:49:28 crc kubenswrapper[4884]: I1210 00:49:28.710593 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.016272 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-866896889b-hszbz"] Dec 10 00:49:38 crc kubenswrapper[4884]: E1210 00:49:38.017845 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" containerName="extract" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.017918 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" containerName="extract" Dec 10 00:49:38 crc kubenswrapper[4884]: E1210 00:49:38.017987 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" containerName="util" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.018041 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" containerName="util" Dec 10 00:49:38 crc kubenswrapper[4884]: E1210 00:49:38.018098 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" containerName="pull" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.018149 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" containerName="pull" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.018318 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba" containerName="extract" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.018837 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.022009 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.022294 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.023938 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.024003 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-r6fhb" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.024310 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.080959 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-866896889b-hszbz"] Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.100714 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cfrt\" (UniqueName: \"kubernetes.io/projected/23b67b7c-b959-4f92-96c1-17daa41985c9-kube-api-access-7cfrt\") pod \"metallb-operator-controller-manager-866896889b-hszbz\" (UID: \"23b67b7c-b959-4f92-96c1-17daa41985c9\") " pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.100787 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/23b67b7c-b959-4f92-96c1-17daa41985c9-apiservice-cert\") pod \"metallb-operator-controller-manager-866896889b-hszbz\" (UID: \"23b67b7c-b959-4f92-96c1-17daa41985c9\") " pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.100863 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/23b67b7c-b959-4f92-96c1-17daa41985c9-webhook-cert\") pod \"metallb-operator-controller-manager-866896889b-hszbz\" (UID: \"23b67b7c-b959-4f92-96c1-17daa41985c9\") " pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.202700 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/23b67b7c-b959-4f92-96c1-17daa41985c9-webhook-cert\") pod \"metallb-operator-controller-manager-866896889b-hszbz\" (UID: \"23b67b7c-b959-4f92-96c1-17daa41985c9\") " pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.202797 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cfrt\" (UniqueName: \"kubernetes.io/projected/23b67b7c-b959-4f92-96c1-17daa41985c9-kube-api-access-7cfrt\") pod \"metallb-operator-controller-manager-866896889b-hszbz\" (UID: \"23b67b7c-b959-4f92-96c1-17daa41985c9\") " pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.202846 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/23b67b7c-b959-4f92-96c1-17daa41985c9-apiservice-cert\") pod \"metallb-operator-controller-manager-866896889b-hszbz\" (UID: \"23b67b7c-b959-4f92-96c1-17daa41985c9\") " pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.209887 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/23b67b7c-b959-4f92-96c1-17daa41985c9-apiservice-cert\") pod \"metallb-operator-controller-manager-866896889b-hszbz\" (UID: \"23b67b7c-b959-4f92-96c1-17daa41985c9\") " pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.222135 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/23b67b7c-b959-4f92-96c1-17daa41985c9-webhook-cert\") pod \"metallb-operator-controller-manager-866896889b-hszbz\" (UID: \"23b67b7c-b959-4f92-96c1-17daa41985c9\") " pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.222475 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cfrt\" (UniqueName: \"kubernetes.io/projected/23b67b7c-b959-4f92-96c1-17daa41985c9-kube-api-access-7cfrt\") pod \"metallb-operator-controller-manager-866896889b-hszbz\" (UID: \"23b67b7c-b959-4f92-96c1-17daa41985c9\") " pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.277383 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt"] Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.278706 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.281468 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-vcd8k" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.281696 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.282502 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.304283 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt"] Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.333230 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.406064 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4377a5f5-ae57-4c9a-9e25-a69371de7097-webhook-cert\") pod \"metallb-operator-webhook-server-556949cfd9-7msjt\" (UID: \"4377a5f5-ae57-4c9a-9e25-a69371de7097\") " pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.406242 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4377a5f5-ae57-4c9a-9e25-a69371de7097-apiservice-cert\") pod \"metallb-operator-webhook-server-556949cfd9-7msjt\" (UID: \"4377a5f5-ae57-4c9a-9e25-a69371de7097\") " pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.406315 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bhgw\" (UniqueName: \"kubernetes.io/projected/4377a5f5-ae57-4c9a-9e25-a69371de7097-kube-api-access-5bhgw\") pod \"metallb-operator-webhook-server-556949cfd9-7msjt\" (UID: \"4377a5f5-ae57-4c9a-9e25-a69371de7097\") " pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.507638 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4377a5f5-ae57-4c9a-9e25-a69371de7097-webhook-cert\") pod \"metallb-operator-webhook-server-556949cfd9-7msjt\" (UID: \"4377a5f5-ae57-4c9a-9e25-a69371de7097\") " pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.508673 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4377a5f5-ae57-4c9a-9e25-a69371de7097-apiservice-cert\") pod \"metallb-operator-webhook-server-556949cfd9-7msjt\" (UID: \"4377a5f5-ae57-4c9a-9e25-a69371de7097\") " pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.508767 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bhgw\" (UniqueName: \"kubernetes.io/projected/4377a5f5-ae57-4c9a-9e25-a69371de7097-kube-api-access-5bhgw\") pod \"metallb-operator-webhook-server-556949cfd9-7msjt\" (UID: \"4377a5f5-ae57-4c9a-9e25-a69371de7097\") " pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.512608 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4377a5f5-ae57-4c9a-9e25-a69371de7097-webhook-cert\") pod \"metallb-operator-webhook-server-556949cfd9-7msjt\" (UID: \"4377a5f5-ae57-4c9a-9e25-a69371de7097\") " pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.513470 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4377a5f5-ae57-4c9a-9e25-a69371de7097-apiservice-cert\") pod \"metallb-operator-webhook-server-556949cfd9-7msjt\" (UID: \"4377a5f5-ae57-4c9a-9e25-a69371de7097\") " pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.532185 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bhgw\" (UniqueName: \"kubernetes.io/projected/4377a5f5-ae57-4c9a-9e25-a69371de7097-kube-api-access-5bhgw\") pod \"metallb-operator-webhook-server-556949cfd9-7msjt\" (UID: \"4377a5f5-ae57-4c9a-9e25-a69371de7097\") " pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.593877 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:49:38 crc kubenswrapper[4884]: I1210 00:49:38.781512 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-866896889b-hszbz"] Dec 10 00:49:38 crc kubenswrapper[4884]: W1210 00:49:38.789731 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod23b67b7c_b959_4f92_96c1_17daa41985c9.slice/crio-2fae3a2059a743bad0b6e1b24634fd94249e61f572340e3e57761bd6adcab963 WatchSource:0}: Error finding container 2fae3a2059a743bad0b6e1b24634fd94249e61f572340e3e57761bd6adcab963: Status 404 returned error can't find the container with id 2fae3a2059a743bad0b6e1b24634fd94249e61f572340e3e57761bd6adcab963 Dec 10 00:49:39 crc kubenswrapper[4884]: I1210 00:49:39.065815 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt"] Dec 10 00:49:39 crc kubenswrapper[4884]: W1210 00:49:39.076465 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4377a5f5_ae57_4c9a_9e25_a69371de7097.slice/crio-cc7dafc4775a395dad8cf0357fbfb7343f1e4ec9ffa5a24418184e1c8f90b508 WatchSource:0}: Error finding container cc7dafc4775a395dad8cf0357fbfb7343f1e4ec9ffa5a24418184e1c8f90b508: Status 404 returned error can't find the container with id cc7dafc4775a395dad8cf0357fbfb7343f1e4ec9ffa5a24418184e1c8f90b508 Dec 10 00:49:39 crc kubenswrapper[4884]: I1210 00:49:39.797184 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" event={"ID":"23b67b7c-b959-4f92-96c1-17daa41985c9","Type":"ContainerStarted","Data":"2fae3a2059a743bad0b6e1b24634fd94249e61f572340e3e57761bd6adcab963"} Dec 10 00:49:39 crc kubenswrapper[4884]: I1210 00:49:39.798815 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" event={"ID":"4377a5f5-ae57-4c9a-9e25-a69371de7097","Type":"ContainerStarted","Data":"cc7dafc4775a395dad8cf0357fbfb7343f1e4ec9ffa5a24418184e1c8f90b508"} Dec 10 00:49:44 crc kubenswrapper[4884]: I1210 00:49:44.837669 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" event={"ID":"23b67b7c-b959-4f92-96c1-17daa41985c9","Type":"ContainerStarted","Data":"7c8047d22951ece7ce4b53af92a467eb2a7e167021dfc795bf7165a9ce8a1518"} Dec 10 00:49:44 crc kubenswrapper[4884]: I1210 00:49:44.838171 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:49:44 crc kubenswrapper[4884]: I1210 00:49:44.839502 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" event={"ID":"4377a5f5-ae57-4c9a-9e25-a69371de7097","Type":"ContainerStarted","Data":"78eee456c85f26cee24ed494e79f353a098cd78cafba11c2f2f8341ed31132c9"} Dec 10 00:49:44 crc kubenswrapper[4884]: I1210 00:49:44.839656 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:49:44 crc kubenswrapper[4884]: I1210 00:49:44.871090 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" podStartSLOduration=1.286808934 podStartE2EDuration="6.871045028s" podCreationTimestamp="2025-12-10 00:49:38 +0000 UTC" firstStartedPulling="2025-12-10 00:49:38.791761717 +0000 UTC m=+1151.869718834" lastFinishedPulling="2025-12-10 00:49:44.375997791 +0000 UTC m=+1157.453954928" observedRunningTime="2025-12-10 00:49:44.868211304 +0000 UTC m=+1157.946168521" watchObservedRunningTime="2025-12-10 00:49:44.871045028 +0000 UTC m=+1157.949002135" Dec 10 00:49:44 crc kubenswrapper[4884]: I1210 00:49:44.888925 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" podStartSLOduration=1.568582056 podStartE2EDuration="6.888898489s" podCreationTimestamp="2025-12-10 00:49:38 +0000 UTC" firstStartedPulling="2025-12-10 00:49:39.07925271 +0000 UTC m=+1152.157209827" lastFinishedPulling="2025-12-10 00:49:44.399569133 +0000 UTC m=+1157.477526260" observedRunningTime="2025-12-10 00:49:44.88818266 +0000 UTC m=+1157.966139847" watchObservedRunningTime="2025-12-10 00:49:44.888898489 +0000 UTC m=+1157.966855646" Dec 10 00:49:48 crc kubenswrapper[4884]: I1210 00:49:48.098226 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:49:48 crc kubenswrapper[4884]: I1210 00:49:48.098619 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:49:48 crc kubenswrapper[4884]: I1210 00:49:48.098670 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:49:48 crc kubenswrapper[4884]: I1210 00:49:48.099350 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"85d53d23856c70f8c66a5944018254f82db2773d6bfbe9b53696ef024e9cffda"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 00:49:48 crc kubenswrapper[4884]: I1210 00:49:48.099408 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://85d53d23856c70f8c66a5944018254f82db2773d6bfbe9b53696ef024e9cffda" gracePeriod=600 Dec 10 00:49:48 crc kubenswrapper[4884]: I1210 00:49:48.871408 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="85d53d23856c70f8c66a5944018254f82db2773d6bfbe9b53696ef024e9cffda" exitCode=0 Dec 10 00:49:48 crc kubenswrapper[4884]: I1210 00:49:48.871628 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"85d53d23856c70f8c66a5944018254f82db2773d6bfbe9b53696ef024e9cffda"} Dec 10 00:49:48 crc kubenswrapper[4884]: I1210 00:49:48.871871 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"c301699d2f400451cf0d2a7c3e824313cd2be40d6ee80bb600b0e9f8df69938a"} Dec 10 00:49:48 crc kubenswrapper[4884]: I1210 00:49:48.871915 4884 scope.go:117] "RemoveContainer" containerID="fb549c86290e5eaaf8d0b787f8b183ae212b7154cabf782092b91c5f95b4ef4c" Dec 10 00:49:58 crc kubenswrapper[4884]: I1210 00:49:58.604195 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-556949cfd9-7msjt" Dec 10 00:50:18 crc kubenswrapper[4884]: I1210 00:50:18.335755 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-866896889b-hszbz" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.211164 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-5gsrt"] Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.214418 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.216301 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.216753 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-bg28g" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.225655 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.226944 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf"] Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.228317 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.232804 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.236952 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf"] Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.256912 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc974\" (UniqueName: \"kubernetes.io/projected/8da05654-9ed1-48fc-8d0f-b507abeda5f0-kube-api-access-lc974\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.256966 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/8da05654-9ed1-48fc-8d0f-b507abeda5f0-frr-sockets\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.257003 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8da05654-9ed1-48fc-8d0f-b507abeda5f0-metrics-certs\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.257026 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/8da05654-9ed1-48fc-8d0f-b507abeda5f0-metrics\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.257157 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkq5b\" (UniqueName: \"kubernetes.io/projected/e737cf7c-81d6-4aa0-b9c3-c3f2de596724-kube-api-access-hkq5b\") pod \"frr-k8s-webhook-server-7fcb986d4-tvnsf\" (UID: \"e737cf7c-81d6-4aa0-b9c3-c3f2de596724\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.257220 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/8da05654-9ed1-48fc-8d0f-b507abeda5f0-reloader\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.257281 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e737cf7c-81d6-4aa0-b9c3-c3f2de596724-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-tvnsf\" (UID: \"e737cf7c-81d6-4aa0-b9c3-c3f2de596724\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.257337 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/8da05654-9ed1-48fc-8d0f-b507abeda5f0-frr-startup\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.257364 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/8da05654-9ed1-48fc-8d0f-b507abeda5f0-frr-conf\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.309237 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-2bbfm"] Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.310360 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.312920 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.313156 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.313375 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-kzbjw" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.313526 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.339012 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-wt5kv"] Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.341090 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.345195 4884 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.350579 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-wt5kv"] Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.357877 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lc974\" (UniqueName: \"kubernetes.io/projected/8da05654-9ed1-48fc-8d0f-b507abeda5f0-kube-api-access-lc974\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.357915 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-memberlist\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.357946 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4nvp\" (UniqueName: \"kubernetes.io/projected/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-kube-api-access-m4nvp\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.357965 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfwxf\" (UniqueName: \"kubernetes.io/projected/cefdd4d8-632f-4912-b381-8d5df050aa30-kube-api-access-vfwxf\") pod \"controller-f8648f98b-wt5kv\" (UID: \"cefdd4d8-632f-4912-b381-8d5df050aa30\") " pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.357987 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/8da05654-9ed1-48fc-8d0f-b507abeda5f0-frr-sockets\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.358010 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cefdd4d8-632f-4912-b381-8d5df050aa30-cert\") pod \"controller-f8648f98b-wt5kv\" (UID: \"cefdd4d8-632f-4912-b381-8d5df050aa30\") " pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.358028 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cefdd4d8-632f-4912-b381-8d5df050aa30-metrics-certs\") pod \"controller-f8648f98b-wt5kv\" (UID: \"cefdd4d8-632f-4912-b381-8d5df050aa30\") " pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.358048 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8da05654-9ed1-48fc-8d0f-b507abeda5f0-metrics-certs\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.358085 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/8da05654-9ed1-48fc-8d0f-b507abeda5f0-metrics\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.358109 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-metrics-certs\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.358128 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkq5b\" (UniqueName: \"kubernetes.io/projected/e737cf7c-81d6-4aa0-b9c3-c3f2de596724-kube-api-access-hkq5b\") pod \"frr-k8s-webhook-server-7fcb986d4-tvnsf\" (UID: \"e737cf7c-81d6-4aa0-b9c3-c3f2de596724\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.358145 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/8da05654-9ed1-48fc-8d0f-b507abeda5f0-reloader\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.358182 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e737cf7c-81d6-4aa0-b9c3-c3f2de596724-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-tvnsf\" (UID: \"e737cf7c-81d6-4aa0-b9c3-c3f2de596724\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.358206 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/8da05654-9ed1-48fc-8d0f-b507abeda5f0-frr-startup\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.358219 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/8da05654-9ed1-48fc-8d0f-b507abeda5f0-frr-conf\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.358239 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-metallb-excludel2\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.359549 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/8da05654-9ed1-48fc-8d0f-b507abeda5f0-frr-conf\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.359637 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/8da05654-9ed1-48fc-8d0f-b507abeda5f0-frr-startup\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.360548 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/8da05654-9ed1-48fc-8d0f-b507abeda5f0-frr-sockets\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.360579 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/8da05654-9ed1-48fc-8d0f-b507abeda5f0-metrics\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.360824 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/8da05654-9ed1-48fc-8d0f-b507abeda5f0-reloader\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.367932 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e737cf7c-81d6-4aa0-b9c3-c3f2de596724-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-tvnsf\" (UID: \"e737cf7c-81d6-4aa0-b9c3-c3f2de596724\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.377997 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8da05654-9ed1-48fc-8d0f-b507abeda5f0-metrics-certs\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.378465 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lc974\" (UniqueName: \"kubernetes.io/projected/8da05654-9ed1-48fc-8d0f-b507abeda5f0-kube-api-access-lc974\") pod \"frr-k8s-5gsrt\" (UID: \"8da05654-9ed1-48fc-8d0f-b507abeda5f0\") " pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.381272 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkq5b\" (UniqueName: \"kubernetes.io/projected/e737cf7c-81d6-4aa0-b9c3-c3f2de596724-kube-api-access-hkq5b\") pod \"frr-k8s-webhook-server-7fcb986d4-tvnsf\" (UID: \"e737cf7c-81d6-4aa0-b9c3-c3f2de596724\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.459386 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-metrics-certs\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.459491 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-metallb-excludel2\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.459530 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-memberlist\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.459555 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4nvp\" (UniqueName: \"kubernetes.io/projected/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-kube-api-access-m4nvp\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.459572 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfwxf\" (UniqueName: \"kubernetes.io/projected/cefdd4d8-632f-4912-b381-8d5df050aa30-kube-api-access-vfwxf\") pod \"controller-f8648f98b-wt5kv\" (UID: \"cefdd4d8-632f-4912-b381-8d5df050aa30\") " pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.459605 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cefdd4d8-632f-4912-b381-8d5df050aa30-cert\") pod \"controller-f8648f98b-wt5kv\" (UID: \"cefdd4d8-632f-4912-b381-8d5df050aa30\") " pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.459621 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cefdd4d8-632f-4912-b381-8d5df050aa30-metrics-certs\") pod \"controller-f8648f98b-wt5kv\" (UID: \"cefdd4d8-632f-4912-b381-8d5df050aa30\") " pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:19 crc kubenswrapper[4884]: E1210 00:50:19.460080 4884 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 10 00:50:19 crc kubenswrapper[4884]: E1210 00:50:19.460159 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-memberlist podName:7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660 nodeName:}" failed. No retries permitted until 2025-12-10 00:50:19.96013928 +0000 UTC m=+1193.038096407 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-memberlist") pod "speaker-2bbfm" (UID: "7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660") : secret "metallb-memberlist" not found Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.460613 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-metallb-excludel2\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.462876 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-metrics-certs\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.463102 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/cefdd4d8-632f-4912-b381-8d5df050aa30-metrics-certs\") pod \"controller-f8648f98b-wt5kv\" (UID: \"cefdd4d8-632f-4912-b381-8d5df050aa30\") " pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.465014 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/cefdd4d8-632f-4912-b381-8d5df050aa30-cert\") pod \"controller-f8648f98b-wt5kv\" (UID: \"cefdd4d8-632f-4912-b381-8d5df050aa30\") " pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.477066 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfwxf\" (UniqueName: \"kubernetes.io/projected/cefdd4d8-632f-4912-b381-8d5df050aa30-kube-api-access-vfwxf\") pod \"controller-f8648f98b-wt5kv\" (UID: \"cefdd4d8-632f-4912-b381-8d5df050aa30\") " pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.479491 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4nvp\" (UniqueName: \"kubernetes.io/projected/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-kube-api-access-m4nvp\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.540363 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.552708 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.658812 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.965871 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-memberlist\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:19 crc kubenswrapper[4884]: E1210 00:50:19.966136 4884 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 10 00:50:19 crc kubenswrapper[4884]: E1210 00:50:19.966330 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-memberlist podName:7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660 nodeName:}" failed. No retries permitted until 2025-12-10 00:50:20.966297066 +0000 UTC m=+1194.044254213 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-memberlist") pod "speaker-2bbfm" (UID: "7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660") : secret "metallb-memberlist" not found Dec 10 00:50:19 crc kubenswrapper[4884]: I1210 00:50:19.982191 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf"] Dec 10 00:50:19 crc kubenswrapper[4884]: W1210 00:50:19.983241 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode737cf7c_81d6_4aa0_b9c3_c3f2de596724.slice/crio-2b0d4a635b4598085809ea3614517600b7aa4d5ae1ebc2f6f722e4dd3bd61af6 WatchSource:0}: Error finding container 2b0d4a635b4598085809ea3614517600b7aa4d5ae1ebc2f6f722e4dd3bd61af6: Status 404 returned error can't find the container with id 2b0d4a635b4598085809ea3614517600b7aa4d5ae1ebc2f6f722e4dd3bd61af6 Dec 10 00:50:20 crc kubenswrapper[4884]: I1210 00:50:20.097734 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-wt5kv"] Dec 10 00:50:20 crc kubenswrapper[4884]: I1210 00:50:20.152199 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" event={"ID":"e737cf7c-81d6-4aa0-b9c3-c3f2de596724","Type":"ContainerStarted","Data":"2b0d4a635b4598085809ea3614517600b7aa4d5ae1ebc2f6f722e4dd3bd61af6"} Dec 10 00:50:20 crc kubenswrapper[4884]: I1210 00:50:20.153801 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-wt5kv" event={"ID":"cefdd4d8-632f-4912-b381-8d5df050aa30","Type":"ContainerStarted","Data":"0ef6b2d2738172c28204c570ea042f60f5b13e0075bc2e5848cd03661fb0a244"} Dec 10 00:50:20 crc kubenswrapper[4884]: I1210 00:50:20.154929 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5gsrt" event={"ID":"8da05654-9ed1-48fc-8d0f-b507abeda5f0","Type":"ContainerStarted","Data":"5bb5448e893d749644be40dedf048748ddeaae1cc794f8eb91b6b4ced843ee6e"} Dec 10 00:50:20 crc kubenswrapper[4884]: I1210 00:50:20.985273 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-memberlist\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:20 crc kubenswrapper[4884]: I1210 00:50:20.991863 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660-memberlist\") pod \"speaker-2bbfm\" (UID: \"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660\") " pod="metallb-system/speaker-2bbfm" Dec 10 00:50:21 crc kubenswrapper[4884]: I1210 00:50:21.125403 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-2bbfm" Dec 10 00:50:21 crc kubenswrapper[4884]: W1210 00:50:21.159264 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e941ef6_a1e7_47b7_b7cb_eaaaaa2a8660.slice/crio-0f1d41a3f7c6455f3da2c74bdf173297e4fd9b7c7c2994eaf07fcb1d04bee08e WatchSource:0}: Error finding container 0f1d41a3f7c6455f3da2c74bdf173297e4fd9b7c7c2994eaf07fcb1d04bee08e: Status 404 returned error can't find the container with id 0f1d41a3f7c6455f3da2c74bdf173297e4fd9b7c7c2994eaf07fcb1d04bee08e Dec 10 00:50:21 crc kubenswrapper[4884]: I1210 00:50:21.172878 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-wt5kv" event={"ID":"cefdd4d8-632f-4912-b381-8d5df050aa30","Type":"ContainerStarted","Data":"f025afaf3fac6f442a5052e1879b22fee7a9143d9c72949b15b669533af3a0ef"} Dec 10 00:50:21 crc kubenswrapper[4884]: I1210 00:50:21.172939 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-wt5kv" event={"ID":"cefdd4d8-632f-4912-b381-8d5df050aa30","Type":"ContainerStarted","Data":"62ae2676aca33bcdf9b705934933e7bdf66c581a222dd2d3c937d037b157b8e3"} Dec 10 00:50:21 crc kubenswrapper[4884]: I1210 00:50:21.174111 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:21 crc kubenswrapper[4884]: I1210 00:50:21.190825 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-wt5kv" podStartSLOduration=2.190801145 podStartE2EDuration="2.190801145s" podCreationTimestamp="2025-12-10 00:50:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:50:21.185797042 +0000 UTC m=+1194.263754179" watchObservedRunningTime="2025-12-10 00:50:21.190801145 +0000 UTC m=+1194.268758262" Dec 10 00:50:22 crc kubenswrapper[4884]: I1210 00:50:22.185599 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2bbfm" event={"ID":"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660","Type":"ContainerStarted","Data":"415423276cf4318201a131fe600201ba85c7be2baa1ae90bafcb0c2f82d88e3f"} Dec 10 00:50:22 crc kubenswrapper[4884]: I1210 00:50:22.185853 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2bbfm" event={"ID":"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660","Type":"ContainerStarted","Data":"36b118f5d7a945064602dc03a4eb27a2dfead93a297e1740c8bac2bc9e090098"} Dec 10 00:50:22 crc kubenswrapper[4884]: I1210 00:50:22.185864 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2bbfm" event={"ID":"7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660","Type":"ContainerStarted","Data":"0f1d41a3f7c6455f3da2c74bdf173297e4fd9b7c7c2994eaf07fcb1d04bee08e"} Dec 10 00:50:22 crc kubenswrapper[4884]: I1210 00:50:22.186401 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-2bbfm" Dec 10 00:50:22 crc kubenswrapper[4884]: I1210 00:50:22.221162 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-2bbfm" podStartSLOduration=3.221147316 podStartE2EDuration="3.221147316s" podCreationTimestamp="2025-12-10 00:50:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:50:22.217856159 +0000 UTC m=+1195.295813296" watchObservedRunningTime="2025-12-10 00:50:22.221147316 +0000 UTC m=+1195.299104433" Dec 10 00:50:27 crc kubenswrapper[4884]: I1210 00:50:27.223267 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" event={"ID":"e737cf7c-81d6-4aa0-b9c3-c3f2de596724","Type":"ContainerStarted","Data":"095c36d3a8c0f72e1e8a20a31588b82ae5f4dd3c3a18ce44da54e24182157d17"} Dec 10 00:50:27 crc kubenswrapper[4884]: I1210 00:50:27.224083 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" Dec 10 00:50:27 crc kubenswrapper[4884]: I1210 00:50:27.227996 4884 generic.go:334] "Generic (PLEG): container finished" podID="8da05654-9ed1-48fc-8d0f-b507abeda5f0" containerID="d420c694006c0c50f6bdedf1a31be3584943c68653b1709b4228618cec98ec58" exitCode=0 Dec 10 00:50:27 crc kubenswrapper[4884]: I1210 00:50:27.228035 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5gsrt" event={"ID":"8da05654-9ed1-48fc-8d0f-b507abeda5f0","Type":"ContainerDied","Data":"d420c694006c0c50f6bdedf1a31be3584943c68653b1709b4228618cec98ec58"} Dec 10 00:50:27 crc kubenswrapper[4884]: I1210 00:50:27.241469 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" podStartSLOduration=1.388470216 podStartE2EDuration="8.2414499s" podCreationTimestamp="2025-12-10 00:50:19 +0000 UTC" firstStartedPulling="2025-12-10 00:50:19.986474633 +0000 UTC m=+1193.064431780" lastFinishedPulling="2025-12-10 00:50:26.839454337 +0000 UTC m=+1199.917411464" observedRunningTime="2025-12-10 00:50:27.235921062 +0000 UTC m=+1200.313878179" watchObservedRunningTime="2025-12-10 00:50:27.2414499 +0000 UTC m=+1200.319407017" Dec 10 00:50:28 crc kubenswrapper[4884]: I1210 00:50:28.240640 4884 generic.go:334] "Generic (PLEG): container finished" podID="8da05654-9ed1-48fc-8d0f-b507abeda5f0" containerID="c2ac517e43748a06edf4e35db0a23c5f6830f1fcb23d05e9976af1947c9a113e" exitCode=0 Dec 10 00:50:28 crc kubenswrapper[4884]: I1210 00:50:28.241038 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5gsrt" event={"ID":"8da05654-9ed1-48fc-8d0f-b507abeda5f0","Type":"ContainerDied","Data":"c2ac517e43748a06edf4e35db0a23c5f6830f1fcb23d05e9976af1947c9a113e"} Dec 10 00:50:29 crc kubenswrapper[4884]: I1210 00:50:29.252007 4884 generic.go:334] "Generic (PLEG): container finished" podID="8da05654-9ed1-48fc-8d0f-b507abeda5f0" containerID="12aa15e1c47786b3ed1c9bffff39868532d84db39cf6f53f76ead28fbda8b5ad" exitCode=0 Dec 10 00:50:29 crc kubenswrapper[4884]: I1210 00:50:29.252063 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5gsrt" event={"ID":"8da05654-9ed1-48fc-8d0f-b507abeda5f0","Type":"ContainerDied","Data":"12aa15e1c47786b3ed1c9bffff39868532d84db39cf6f53f76ead28fbda8b5ad"} Dec 10 00:50:30 crc kubenswrapper[4884]: I1210 00:50:30.262252 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5gsrt" event={"ID":"8da05654-9ed1-48fc-8d0f-b507abeda5f0","Type":"ContainerStarted","Data":"50fa54cb5ed4419552937b7b2eb58da36db9f1352b1a3817a1b013348bff0487"} Dec 10 00:50:30 crc kubenswrapper[4884]: I1210 00:50:30.262618 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5gsrt" event={"ID":"8da05654-9ed1-48fc-8d0f-b507abeda5f0","Type":"ContainerStarted","Data":"aae9ec24eea3a00cfab73a47fa02c7ea52c711f9e77fb0396fe1f02fbb047e3e"} Dec 10 00:50:30 crc kubenswrapper[4884]: I1210 00:50:30.262634 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5gsrt" event={"ID":"8da05654-9ed1-48fc-8d0f-b507abeda5f0","Type":"ContainerStarted","Data":"dd7e2643c899bbe077b1bec48bea4869336446734f5e0c2d72ccd73c8692999d"} Dec 10 00:50:30 crc kubenswrapper[4884]: I1210 00:50:30.262646 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5gsrt" event={"ID":"8da05654-9ed1-48fc-8d0f-b507abeda5f0","Type":"ContainerStarted","Data":"3d0852a3179a96dbcfe837afa983101c3f3be1ab38497d52497b7233d7145b80"} Dec 10 00:50:30 crc kubenswrapper[4884]: I1210 00:50:30.262656 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5gsrt" event={"ID":"8da05654-9ed1-48fc-8d0f-b507abeda5f0","Type":"ContainerStarted","Data":"c0bd21bcab4de4af55880e9300a279c13c04d9517cb16df1fdbc85095a6ecaa4"} Dec 10 00:50:31 crc kubenswrapper[4884]: I1210 00:50:31.132949 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-2bbfm" Dec 10 00:50:31 crc kubenswrapper[4884]: I1210 00:50:31.279577 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-5gsrt" event={"ID":"8da05654-9ed1-48fc-8d0f-b507abeda5f0","Type":"ContainerStarted","Data":"0ae00df4109888e938d319d075d988b9fa6bcda466b01cecc5d83f97dc64a112"} Dec 10 00:50:31 crc kubenswrapper[4884]: I1210 00:50:31.279864 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:31 crc kubenswrapper[4884]: I1210 00:50:31.308761 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-5gsrt" podStartSLOduration=5.126903533 podStartE2EDuration="12.308740631s" podCreationTimestamp="2025-12-10 00:50:19 +0000 UTC" firstStartedPulling="2025-12-10 00:50:19.69110184 +0000 UTC m=+1192.769058967" lastFinishedPulling="2025-12-10 00:50:26.872938948 +0000 UTC m=+1199.950896065" observedRunningTime="2025-12-10 00:50:31.305791853 +0000 UTC m=+1204.383749010" watchObservedRunningTime="2025-12-10 00:50:31.308740631 +0000 UTC m=+1204.386697758" Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.142773 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-2nmf7"] Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.144498 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-2nmf7" Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.147189 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-n9twx" Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.147637 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.148257 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.183830 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-2nmf7"] Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.305622 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xhsk\" (UniqueName: \"kubernetes.io/projected/b07c57d2-f5b1-404c-aed9-15ab2339ff80-kube-api-access-5xhsk\") pod \"openstack-operator-index-2nmf7\" (UID: \"b07c57d2-f5b1-404c-aed9-15ab2339ff80\") " pod="openstack-operators/openstack-operator-index-2nmf7" Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.407471 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xhsk\" (UniqueName: \"kubernetes.io/projected/b07c57d2-f5b1-404c-aed9-15ab2339ff80-kube-api-access-5xhsk\") pod \"openstack-operator-index-2nmf7\" (UID: \"b07c57d2-f5b1-404c-aed9-15ab2339ff80\") " pod="openstack-operators/openstack-operator-index-2nmf7" Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.438474 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xhsk\" (UniqueName: \"kubernetes.io/projected/b07c57d2-f5b1-404c-aed9-15ab2339ff80-kube-api-access-5xhsk\") pod \"openstack-operator-index-2nmf7\" (UID: \"b07c57d2-f5b1-404c-aed9-15ab2339ff80\") " pod="openstack-operators/openstack-operator-index-2nmf7" Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.482164 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-2nmf7" Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.541554 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.606753 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:34 crc kubenswrapper[4884]: I1210 00:50:34.988837 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-2nmf7"] Dec 10 00:50:34 crc kubenswrapper[4884]: W1210 00:50:34.995292 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb07c57d2_f5b1_404c_aed9_15ab2339ff80.slice/crio-980069e12d75e1092361186d2cdec9b22a550771a95ab434bc5dc18f6c767cbc WatchSource:0}: Error finding container 980069e12d75e1092361186d2cdec9b22a550771a95ab434bc5dc18f6c767cbc: Status 404 returned error can't find the container with id 980069e12d75e1092361186d2cdec9b22a550771a95ab434bc5dc18f6c767cbc Dec 10 00:50:35 crc kubenswrapper[4884]: I1210 00:50:35.312085 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-2nmf7" event={"ID":"b07c57d2-f5b1-404c-aed9-15ab2339ff80","Type":"ContainerStarted","Data":"980069e12d75e1092361186d2cdec9b22a550771a95ab434bc5dc18f6c767cbc"} Dec 10 00:50:37 crc kubenswrapper[4884]: I1210 00:50:37.511545 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-2nmf7"] Dec 10 00:50:38 crc kubenswrapper[4884]: I1210 00:50:38.122117 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-bpdr4"] Dec 10 00:50:38 crc kubenswrapper[4884]: I1210 00:50:38.123123 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-bpdr4" Dec 10 00:50:38 crc kubenswrapper[4884]: I1210 00:50:38.134258 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-bpdr4"] Dec 10 00:50:38 crc kubenswrapper[4884]: I1210 00:50:38.207801 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgdg7\" (UniqueName: \"kubernetes.io/projected/394f665f-f71e-4835-be68-15a3172a712e-kube-api-access-xgdg7\") pod \"openstack-operator-index-bpdr4\" (UID: \"394f665f-f71e-4835-be68-15a3172a712e\") " pod="openstack-operators/openstack-operator-index-bpdr4" Dec 10 00:50:38 crc kubenswrapper[4884]: I1210 00:50:38.309071 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgdg7\" (UniqueName: \"kubernetes.io/projected/394f665f-f71e-4835-be68-15a3172a712e-kube-api-access-xgdg7\") pod \"openstack-operator-index-bpdr4\" (UID: \"394f665f-f71e-4835-be68-15a3172a712e\") " pod="openstack-operators/openstack-operator-index-bpdr4" Dec 10 00:50:38 crc kubenswrapper[4884]: I1210 00:50:38.335820 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgdg7\" (UniqueName: \"kubernetes.io/projected/394f665f-f71e-4835-be68-15a3172a712e-kube-api-access-xgdg7\") pod \"openstack-operator-index-bpdr4\" (UID: \"394f665f-f71e-4835-be68-15a3172a712e\") " pod="openstack-operators/openstack-operator-index-bpdr4" Dec 10 00:50:38 crc kubenswrapper[4884]: I1210 00:50:38.445546 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-bpdr4" Dec 10 00:50:38 crc kubenswrapper[4884]: I1210 00:50:38.662043 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-bpdr4"] Dec 10 00:50:38 crc kubenswrapper[4884]: W1210 00:50:38.667644 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod394f665f_f71e_4835_be68_15a3172a712e.slice/crio-5b98d8538e02156f6a6b057effedd36f4d28f6130b76cb152b0c7a1652a98c17 WatchSource:0}: Error finding container 5b98d8538e02156f6a6b057effedd36f4d28f6130b76cb152b0c7a1652a98c17: Status 404 returned error can't find the container with id 5b98d8538e02156f6a6b057effedd36f4d28f6130b76cb152b0c7a1652a98c17 Dec 10 00:50:39 crc kubenswrapper[4884]: I1210 00:50:39.342460 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-2nmf7" event={"ID":"b07c57d2-f5b1-404c-aed9-15ab2339ff80","Type":"ContainerStarted","Data":"a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e"} Dec 10 00:50:39 crc kubenswrapper[4884]: I1210 00:50:39.342636 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-2nmf7" podUID="b07c57d2-f5b1-404c-aed9-15ab2339ff80" containerName="registry-server" containerID="cri-o://a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e" gracePeriod=2 Dec 10 00:50:39 crc kubenswrapper[4884]: I1210 00:50:39.345151 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-bpdr4" event={"ID":"394f665f-f71e-4835-be68-15a3172a712e","Type":"ContainerStarted","Data":"e4dd9b7598a1a71109c8daee9f599f35b175d3f9732e17052b521339b112e395"} Dec 10 00:50:39 crc kubenswrapper[4884]: I1210 00:50:39.345235 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-bpdr4" event={"ID":"394f665f-f71e-4835-be68-15a3172a712e","Type":"ContainerStarted","Data":"5b98d8538e02156f6a6b057effedd36f4d28f6130b76cb152b0c7a1652a98c17"} Dec 10 00:50:39 crc kubenswrapper[4884]: I1210 00:50:39.364324 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-2nmf7" podStartSLOduration=2.159088659 podStartE2EDuration="5.364299652s" podCreationTimestamp="2025-12-10 00:50:34 +0000 UTC" firstStartedPulling="2025-12-10 00:50:34.997511895 +0000 UTC m=+1208.075469012" lastFinishedPulling="2025-12-10 00:50:38.202722878 +0000 UTC m=+1211.280680005" observedRunningTime="2025-12-10 00:50:39.360935312 +0000 UTC m=+1212.438892439" watchObservedRunningTime="2025-12-10 00:50:39.364299652 +0000 UTC m=+1212.442256779" Dec 10 00:50:39 crc kubenswrapper[4884]: I1210 00:50:39.381394 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-bpdr4" podStartSLOduration=1.312358669 podStartE2EDuration="1.381377436s" podCreationTimestamp="2025-12-10 00:50:38 +0000 UTC" firstStartedPulling="2025-12-10 00:50:38.673298216 +0000 UTC m=+1211.751255333" lastFinishedPulling="2025-12-10 00:50:38.742316973 +0000 UTC m=+1211.820274100" observedRunningTime="2025-12-10 00:50:39.378850769 +0000 UTC m=+1212.456807936" watchObservedRunningTime="2025-12-10 00:50:39.381377436 +0000 UTC m=+1212.459334563" Dec 10 00:50:39 crc kubenswrapper[4884]: I1210 00:50:39.544111 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-5gsrt" Dec 10 00:50:39 crc kubenswrapper[4884]: I1210 00:50:39.564687 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-tvnsf" Dec 10 00:50:39 crc kubenswrapper[4884]: I1210 00:50:39.668001 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-wt5kv" Dec 10 00:50:39 crc kubenswrapper[4884]: I1210 00:50:39.785763 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-2nmf7" Dec 10 00:50:39 crc kubenswrapper[4884]: I1210 00:50:39.930003 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xhsk\" (UniqueName: \"kubernetes.io/projected/b07c57d2-f5b1-404c-aed9-15ab2339ff80-kube-api-access-5xhsk\") pod \"b07c57d2-f5b1-404c-aed9-15ab2339ff80\" (UID: \"b07c57d2-f5b1-404c-aed9-15ab2339ff80\") " Dec 10 00:50:39 crc kubenswrapper[4884]: I1210 00:50:39.939793 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b07c57d2-f5b1-404c-aed9-15ab2339ff80-kube-api-access-5xhsk" (OuterVolumeSpecName: "kube-api-access-5xhsk") pod "b07c57d2-f5b1-404c-aed9-15ab2339ff80" (UID: "b07c57d2-f5b1-404c-aed9-15ab2339ff80"). InnerVolumeSpecName "kube-api-access-5xhsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:50:40 crc kubenswrapper[4884]: I1210 00:50:40.033032 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xhsk\" (UniqueName: \"kubernetes.io/projected/b07c57d2-f5b1-404c-aed9-15ab2339ff80-kube-api-access-5xhsk\") on node \"crc\" DevicePath \"\"" Dec 10 00:50:40 crc kubenswrapper[4884]: I1210 00:50:40.358980 4884 generic.go:334] "Generic (PLEG): container finished" podID="b07c57d2-f5b1-404c-aed9-15ab2339ff80" containerID="a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e" exitCode=0 Dec 10 00:50:40 crc kubenswrapper[4884]: I1210 00:50:40.359074 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-2nmf7" Dec 10 00:50:40 crc kubenswrapper[4884]: I1210 00:50:40.359107 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-2nmf7" event={"ID":"b07c57d2-f5b1-404c-aed9-15ab2339ff80","Type":"ContainerDied","Data":"a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e"} Dec 10 00:50:40 crc kubenswrapper[4884]: I1210 00:50:40.360463 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-2nmf7" event={"ID":"b07c57d2-f5b1-404c-aed9-15ab2339ff80","Type":"ContainerDied","Data":"980069e12d75e1092361186d2cdec9b22a550771a95ab434bc5dc18f6c767cbc"} Dec 10 00:50:40 crc kubenswrapper[4884]: I1210 00:50:40.360509 4884 scope.go:117] "RemoveContainer" containerID="a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e" Dec 10 00:50:40 crc kubenswrapper[4884]: I1210 00:50:40.390590 4884 scope.go:117] "RemoveContainer" containerID="a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e" Dec 10 00:50:40 crc kubenswrapper[4884]: E1210 00:50:40.391339 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e\": container with ID starting with a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e not found: ID does not exist" containerID="a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e" Dec 10 00:50:40 crc kubenswrapper[4884]: I1210 00:50:40.391403 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e"} err="failed to get container status \"a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e\": rpc error: code = NotFound desc = could not find container \"a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e\": container with ID starting with a92370c27ef99b2e717fcbb59787113ffa9168784f8e1bfc08ecab63796df54e not found: ID does not exist" Dec 10 00:50:40 crc kubenswrapper[4884]: I1210 00:50:40.401568 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-2nmf7"] Dec 10 00:50:40 crc kubenswrapper[4884]: I1210 00:50:40.406942 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-2nmf7"] Dec 10 00:50:41 crc kubenswrapper[4884]: I1210 00:50:41.298677 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b07c57d2-f5b1-404c-aed9-15ab2339ff80" path="/var/lib/kubelet/pods/b07c57d2-f5b1-404c-aed9-15ab2339ff80/volumes" Dec 10 00:50:48 crc kubenswrapper[4884]: I1210 00:50:48.446746 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-bpdr4" Dec 10 00:50:48 crc kubenswrapper[4884]: I1210 00:50:48.447387 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-bpdr4" Dec 10 00:50:48 crc kubenswrapper[4884]: I1210 00:50:48.489514 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-bpdr4" Dec 10 00:50:49 crc kubenswrapper[4884]: I1210 00:50:49.473743 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-bpdr4" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.163309 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5"] Dec 10 00:50:55 crc kubenswrapper[4884]: E1210 00:50:55.165570 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b07c57d2-f5b1-404c-aed9-15ab2339ff80" containerName="registry-server" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.165666 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b07c57d2-f5b1-404c-aed9-15ab2339ff80" containerName="registry-server" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.165948 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b07c57d2-f5b1-404c-aed9-15ab2339ff80" containerName="registry-server" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.167226 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.171102 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-2dr52" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.174034 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5"] Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.284488 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rwtx\" (UniqueName: \"kubernetes.io/projected/c5236b27-f7c5-4be5-9cab-a964b2206b70-kube-api-access-5rwtx\") pod \"89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5\" (UID: \"c5236b27-f7c5-4be5-9cab-a964b2206b70\") " pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.284791 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c5236b27-f7c5-4be5-9cab-a964b2206b70-bundle\") pod \"89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5\" (UID: \"c5236b27-f7c5-4be5-9cab-a964b2206b70\") " pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.284955 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c5236b27-f7c5-4be5-9cab-a964b2206b70-util\") pod \"89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5\" (UID: \"c5236b27-f7c5-4be5-9cab-a964b2206b70\") " pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.387130 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rwtx\" (UniqueName: \"kubernetes.io/projected/c5236b27-f7c5-4be5-9cab-a964b2206b70-kube-api-access-5rwtx\") pod \"89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5\" (UID: \"c5236b27-f7c5-4be5-9cab-a964b2206b70\") " pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.387669 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c5236b27-f7c5-4be5-9cab-a964b2206b70-bundle\") pod \"89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5\" (UID: \"c5236b27-f7c5-4be5-9cab-a964b2206b70\") " pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.388315 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c5236b27-f7c5-4be5-9cab-a964b2206b70-bundle\") pod \"89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5\" (UID: \"c5236b27-f7c5-4be5-9cab-a964b2206b70\") " pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.387925 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c5236b27-f7c5-4be5-9cab-a964b2206b70-util\") pod \"89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5\" (UID: \"c5236b27-f7c5-4be5-9cab-a964b2206b70\") " pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.388756 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c5236b27-f7c5-4be5-9cab-a964b2206b70-util\") pod \"89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5\" (UID: \"c5236b27-f7c5-4be5-9cab-a964b2206b70\") " pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.415217 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rwtx\" (UniqueName: \"kubernetes.io/projected/c5236b27-f7c5-4be5-9cab-a964b2206b70-kube-api-access-5rwtx\") pod \"89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5\" (UID: \"c5236b27-f7c5-4be5-9cab-a964b2206b70\") " pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.490243 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:50:55 crc kubenswrapper[4884]: I1210 00:50:55.942804 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5"] Dec 10 00:50:56 crc kubenswrapper[4884]: I1210 00:50:56.502389 4884 generic.go:334] "Generic (PLEG): container finished" podID="c5236b27-f7c5-4be5-9cab-a964b2206b70" containerID="ced8177ef2ff6fcfb85a57891043bb7dc2bb8fbf771624f15b93df0cb6079e86" exitCode=0 Dec 10 00:50:56 crc kubenswrapper[4884]: I1210 00:50:56.502473 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" event={"ID":"c5236b27-f7c5-4be5-9cab-a964b2206b70","Type":"ContainerDied","Data":"ced8177ef2ff6fcfb85a57891043bb7dc2bb8fbf771624f15b93df0cb6079e86"} Dec 10 00:50:56 crc kubenswrapper[4884]: I1210 00:50:56.502814 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" event={"ID":"c5236b27-f7c5-4be5-9cab-a964b2206b70","Type":"ContainerStarted","Data":"2b79ce6fb1bd5def3d780211354216c0d4a8c30f1eb35545f999361796bb6a69"} Dec 10 00:50:57 crc kubenswrapper[4884]: I1210 00:50:57.510911 4884 generic.go:334] "Generic (PLEG): container finished" podID="c5236b27-f7c5-4be5-9cab-a964b2206b70" containerID="99b8782ea0f1b4e290fdee6771b5f27fb6a6b8766ddcce4e38f9218b39b8f50e" exitCode=0 Dec 10 00:50:57 crc kubenswrapper[4884]: I1210 00:50:57.510962 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" event={"ID":"c5236b27-f7c5-4be5-9cab-a964b2206b70","Type":"ContainerDied","Data":"99b8782ea0f1b4e290fdee6771b5f27fb6a6b8766ddcce4e38f9218b39b8f50e"} Dec 10 00:50:58 crc kubenswrapper[4884]: I1210 00:50:58.521469 4884 generic.go:334] "Generic (PLEG): container finished" podID="c5236b27-f7c5-4be5-9cab-a964b2206b70" containerID="a4889484321cfab154529e901fd561e221fdb708e01fe830ff7d27ac629e7a12" exitCode=0 Dec 10 00:50:58 crc kubenswrapper[4884]: I1210 00:50:58.521546 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" event={"ID":"c5236b27-f7c5-4be5-9cab-a964b2206b70","Type":"ContainerDied","Data":"a4889484321cfab154529e901fd561e221fdb708e01fe830ff7d27ac629e7a12"} Dec 10 00:50:59 crc kubenswrapper[4884]: I1210 00:50:59.852106 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:50:59 crc kubenswrapper[4884]: I1210 00:50:59.963174 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c5236b27-f7c5-4be5-9cab-a964b2206b70-util\") pod \"c5236b27-f7c5-4be5-9cab-a964b2206b70\" (UID: \"c5236b27-f7c5-4be5-9cab-a964b2206b70\") " Dec 10 00:50:59 crc kubenswrapper[4884]: I1210 00:50:59.963285 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c5236b27-f7c5-4be5-9cab-a964b2206b70-bundle\") pod \"c5236b27-f7c5-4be5-9cab-a964b2206b70\" (UID: \"c5236b27-f7c5-4be5-9cab-a964b2206b70\") " Dec 10 00:50:59 crc kubenswrapper[4884]: I1210 00:50:59.963315 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rwtx\" (UniqueName: \"kubernetes.io/projected/c5236b27-f7c5-4be5-9cab-a964b2206b70-kube-api-access-5rwtx\") pod \"c5236b27-f7c5-4be5-9cab-a964b2206b70\" (UID: \"c5236b27-f7c5-4be5-9cab-a964b2206b70\") " Dec 10 00:50:59 crc kubenswrapper[4884]: I1210 00:50:59.964176 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5236b27-f7c5-4be5-9cab-a964b2206b70-bundle" (OuterVolumeSpecName: "bundle") pod "c5236b27-f7c5-4be5-9cab-a964b2206b70" (UID: "c5236b27-f7c5-4be5-9cab-a964b2206b70"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:50:59 crc kubenswrapper[4884]: I1210 00:50:59.970973 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5236b27-f7c5-4be5-9cab-a964b2206b70-kube-api-access-5rwtx" (OuterVolumeSpecName: "kube-api-access-5rwtx") pod "c5236b27-f7c5-4be5-9cab-a964b2206b70" (UID: "c5236b27-f7c5-4be5-9cab-a964b2206b70"). InnerVolumeSpecName "kube-api-access-5rwtx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:50:59 crc kubenswrapper[4884]: I1210 00:50:59.988801 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5236b27-f7c5-4be5-9cab-a964b2206b70-util" (OuterVolumeSpecName: "util") pod "c5236b27-f7c5-4be5-9cab-a964b2206b70" (UID: "c5236b27-f7c5-4be5-9cab-a964b2206b70"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:51:00 crc kubenswrapper[4884]: I1210 00:51:00.065657 4884 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/c5236b27-f7c5-4be5-9cab-a964b2206b70-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:51:00 crc kubenswrapper[4884]: I1210 00:51:00.065688 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rwtx\" (UniqueName: \"kubernetes.io/projected/c5236b27-f7c5-4be5-9cab-a964b2206b70-kube-api-access-5rwtx\") on node \"crc\" DevicePath \"\"" Dec 10 00:51:00 crc kubenswrapper[4884]: I1210 00:51:00.065701 4884 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/c5236b27-f7c5-4be5-9cab-a964b2206b70-util\") on node \"crc\" DevicePath \"\"" Dec 10 00:51:00 crc kubenswrapper[4884]: I1210 00:51:00.540548 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" event={"ID":"c5236b27-f7c5-4be5-9cab-a964b2206b70","Type":"ContainerDied","Data":"2b79ce6fb1bd5def3d780211354216c0d4a8c30f1eb35545f999361796bb6a69"} Dec 10 00:51:00 crc kubenswrapper[4884]: I1210 00:51:00.540585 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b79ce6fb1bd5def3d780211354216c0d4a8c30f1eb35545f999361796bb6a69" Dec 10 00:51:00 crc kubenswrapper[4884]: I1210 00:51:00.540617 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5" Dec 10 00:51:07 crc kubenswrapper[4884]: I1210 00:51:07.275587 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f"] Dec 10 00:51:07 crc kubenswrapper[4884]: E1210 00:51:07.276398 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5236b27-f7c5-4be5-9cab-a964b2206b70" containerName="util" Dec 10 00:51:07 crc kubenswrapper[4884]: I1210 00:51:07.276413 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5236b27-f7c5-4be5-9cab-a964b2206b70" containerName="util" Dec 10 00:51:07 crc kubenswrapper[4884]: E1210 00:51:07.276451 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5236b27-f7c5-4be5-9cab-a964b2206b70" containerName="pull" Dec 10 00:51:07 crc kubenswrapper[4884]: I1210 00:51:07.276460 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5236b27-f7c5-4be5-9cab-a964b2206b70" containerName="pull" Dec 10 00:51:07 crc kubenswrapper[4884]: E1210 00:51:07.276480 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5236b27-f7c5-4be5-9cab-a964b2206b70" containerName="extract" Dec 10 00:51:07 crc kubenswrapper[4884]: I1210 00:51:07.276489 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5236b27-f7c5-4be5-9cab-a964b2206b70" containerName="extract" Dec 10 00:51:07 crc kubenswrapper[4884]: I1210 00:51:07.276603 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5236b27-f7c5-4be5-9cab-a964b2206b70" containerName="extract" Dec 10 00:51:07 crc kubenswrapper[4884]: I1210 00:51:07.277117 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f" Dec 10 00:51:07 crc kubenswrapper[4884]: W1210 00:51:07.279587 4884 reflector.go:561] object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-zxcp6": failed to list *v1.Secret: secrets "openstack-operator-controller-operator-dockercfg-zxcp6" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack-operators": no relationship found between node 'crc' and this object Dec 10 00:51:07 crc kubenswrapper[4884]: E1210 00:51:07.279634 4884 reflector.go:158] "Unhandled Error" err="object-\"openstack-operators\"/\"openstack-operator-controller-operator-dockercfg-zxcp6\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openstack-operator-controller-operator-dockercfg-zxcp6\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack-operators\": no relationship found between node 'crc' and this object" logger="UnhandledError" Dec 10 00:51:07 crc kubenswrapper[4884]: I1210 00:51:07.334637 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f"] Dec 10 00:51:07 crc kubenswrapper[4884]: I1210 00:51:07.377568 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvj2q\" (UniqueName: \"kubernetes.io/projected/c1bdca51-e4b7-449f-a087-1247a1649701-kube-api-access-jvj2q\") pod \"openstack-operator-controller-operator-7cccdd5794-k969f\" (UID: \"c1bdca51-e4b7-449f-a087-1247a1649701\") " pod="openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f" Dec 10 00:51:07 crc kubenswrapper[4884]: I1210 00:51:07.478840 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvj2q\" (UniqueName: \"kubernetes.io/projected/c1bdca51-e4b7-449f-a087-1247a1649701-kube-api-access-jvj2q\") pod \"openstack-operator-controller-operator-7cccdd5794-k969f\" (UID: \"c1bdca51-e4b7-449f-a087-1247a1649701\") " pod="openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f" Dec 10 00:51:07 crc kubenswrapper[4884]: I1210 00:51:07.497953 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvj2q\" (UniqueName: \"kubernetes.io/projected/c1bdca51-e4b7-449f-a087-1247a1649701-kube-api-access-jvj2q\") pod \"openstack-operator-controller-operator-7cccdd5794-k969f\" (UID: \"c1bdca51-e4b7-449f-a087-1247a1649701\") " pod="openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f" Dec 10 00:51:08 crc kubenswrapper[4884]: I1210 00:51:08.448183 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-zxcp6" Dec 10 00:51:08 crc kubenswrapper[4884]: I1210 00:51:08.457376 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f" Dec 10 00:51:08 crc kubenswrapper[4884]: I1210 00:51:08.942151 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f"] Dec 10 00:51:09 crc kubenswrapper[4884]: I1210 00:51:09.612882 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f" event={"ID":"c1bdca51-e4b7-449f-a087-1247a1649701","Type":"ContainerStarted","Data":"715855b158359eaac1c58fc6c19472988518912d9b403c401469aade1306052a"} Dec 10 00:51:13 crc kubenswrapper[4884]: I1210 00:51:13.657246 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f" event={"ID":"c1bdca51-e4b7-449f-a087-1247a1649701","Type":"ContainerStarted","Data":"64762d538d074f1f2ca7c2c94e0b60c1bb6ab7ce0d8fc2bbeba770872e6a5c97"} Dec 10 00:51:13 crc kubenswrapper[4884]: I1210 00:51:13.657838 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f" Dec 10 00:51:13 crc kubenswrapper[4884]: I1210 00:51:13.691379 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f" podStartSLOduration=2.529938456 podStartE2EDuration="6.691365203s" podCreationTimestamp="2025-12-10 00:51:07 +0000 UTC" firstStartedPulling="2025-12-10 00:51:08.956958131 +0000 UTC m=+1242.034915258" lastFinishedPulling="2025-12-10 00:51:13.118384888 +0000 UTC m=+1246.196342005" observedRunningTime="2025-12-10 00:51:13.687570222 +0000 UTC m=+1246.765527359" watchObservedRunningTime="2025-12-10 00:51:13.691365203 +0000 UTC m=+1246.769322320" Dec 10 00:51:18 crc kubenswrapper[4884]: I1210 00:51:18.460408 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7cccdd5794-k969f" Dec 10 00:51:48 crc kubenswrapper[4884]: I1210 00:51:48.098503 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:51:48 crc kubenswrapper[4884]: I1210 00:51:48.099017 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.453011 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.454885 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.458868 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-fsqnd" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.461909 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.463047 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.465380 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-768vk" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.468841 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.469933 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.477104 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-q8g6n" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.487189 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.505747 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.543716 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.587476 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.588954 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.593029 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-hwml8" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.603454 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.604726 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.607824 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-pqxdk" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.610594 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8thz5\" (UniqueName: \"kubernetes.io/projected/c88f7e0d-d880-42ce-96fd-a1d1ec7be33f-kube-api-access-8thz5\") pod \"barbican-operator-controller-manager-7d9dfd778-w6tvz\" (UID: \"c88f7e0d-d880-42ce-96fd-a1d1ec7be33f\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.610699 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wb96s\" (UniqueName: \"kubernetes.io/projected/c355d3e8-66ee-46b5-8979-d94efb631d6a-kube-api-access-wb96s\") pod \"cinder-operator-controller-manager-6c677c69b-twll8\" (UID: \"c355d3e8-66ee-46b5-8979-d94efb631d6a\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.610737 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8ltt\" (UniqueName: \"kubernetes.io/projected/6953ae1f-46db-410e-b79a-7eff9b687850-kube-api-access-h8ltt\") pod \"designate-operator-controller-manager-697fb699cf-gdkh6\" (UID: \"6953ae1f-46db-410e-b79a-7eff9b687850\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.621444 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.633048 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.648610 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.649725 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.655735 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.656937 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.664399 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.665661 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.665806 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-tprr4" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.666092 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-br5v9" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.675196 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.687792 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.688018 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.690724 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-qhhk8" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.695762 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.697089 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.701046 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-gc786" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.706070 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.711489 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wb96s\" (UniqueName: \"kubernetes.io/projected/c355d3e8-66ee-46b5-8979-d94efb631d6a-kube-api-access-wb96s\") pod \"cinder-operator-controller-manager-6c677c69b-twll8\" (UID: \"c355d3e8-66ee-46b5-8979-d94efb631d6a\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.711542 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8ltt\" (UniqueName: \"kubernetes.io/projected/6953ae1f-46db-410e-b79a-7eff9b687850-kube-api-access-h8ltt\") pod \"designate-operator-controller-manager-697fb699cf-gdkh6\" (UID: \"6953ae1f-46db-410e-b79a-7eff9b687850\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.711587 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7txc9\" (UniqueName: \"kubernetes.io/projected/203fedfb-ba93-4cb0-afd5-a01607b4f40d-kube-api-access-7txc9\") pod \"heat-operator-controller-manager-5f64f6f8bb-w9xfr\" (UID: \"203fedfb-ba93-4cb0-afd5-a01607b4f40d\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.711609 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dg2hg\" (UniqueName: \"kubernetes.io/projected/c74b9b64-5f7c-462a-85e9-a7eacaf2824e-kube-api-access-dg2hg\") pod \"glance-operator-controller-manager-5697bb5779-84wnx\" (UID: \"c74b9b64-5f7c-462a-85e9-a7eacaf2824e\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.711628 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8thz5\" (UniqueName: \"kubernetes.io/projected/c88f7e0d-d880-42ce-96fd-a1d1ec7be33f-kube-api-access-8thz5\") pod \"barbican-operator-controller-manager-7d9dfd778-w6tvz\" (UID: \"c88f7e0d-d880-42ce-96fd-a1d1ec7be33f\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.738501 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.743726 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.745637 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.748184 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-rl2l8" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.760350 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8ltt\" (UniqueName: \"kubernetes.io/projected/6953ae1f-46db-410e-b79a-7eff9b687850-kube-api-access-h8ltt\") pod \"designate-operator-controller-manager-697fb699cf-gdkh6\" (UID: \"6953ae1f-46db-410e-b79a-7eff9b687850\") " pod="openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.763759 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wb96s\" (UniqueName: \"kubernetes.io/projected/c355d3e8-66ee-46b5-8979-d94efb631d6a-kube-api-access-wb96s\") pod \"cinder-operator-controller-manager-6c677c69b-twll8\" (UID: \"c355d3e8-66ee-46b5-8979-d94efb631d6a\") " pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.778732 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8thz5\" (UniqueName: \"kubernetes.io/projected/c88f7e0d-d880-42ce-96fd-a1d1ec7be33f-kube-api-access-8thz5\") pod \"barbican-operator-controller-manager-7d9dfd778-w6tvz\" (UID: \"c88f7e0d-d880-42ce-96fd-a1d1ec7be33f\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.781164 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.782370 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.784108 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-7vqhc" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.790509 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.797746 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.801827 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.803046 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.814146 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-lcx7w" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.814743 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.815043 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.815344 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvt2r\" (UniqueName: \"kubernetes.io/projected/53fab550-e4ee-4601-bc21-e93b60ee3788-kube-api-access-kvt2r\") pod \"ironic-operator-controller-manager-967d97867-5zl6k\" (UID: \"53fab550-e4ee-4601-bc21-e93b60ee3788\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.815377 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5st2\" (UniqueName: \"kubernetes.io/projected/4206f75a-b9be-4d84-806b-0dea3aab1823-kube-api-access-g5st2\") pod \"infra-operator-controller-manager-78d48bff9d-gxgfp\" (UID: \"4206f75a-b9be-4d84-806b-0dea3aab1823\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.815408 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert\") pod \"infra-operator-controller-manager-78d48bff9d-gxgfp\" (UID: \"4206f75a-b9be-4d84-806b-0dea3aab1823\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.815449 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7txc9\" (UniqueName: \"kubernetes.io/projected/203fedfb-ba93-4cb0-afd5-a01607b4f40d-kube-api-access-7txc9\") pod \"heat-operator-controller-manager-5f64f6f8bb-w9xfr\" (UID: \"203fedfb-ba93-4cb0-afd5-a01607b4f40d\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.815470 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dg2hg\" (UniqueName: \"kubernetes.io/projected/c74b9b64-5f7c-462a-85e9-a7eacaf2824e-kube-api-access-dg2hg\") pod \"glance-operator-controller-manager-5697bb5779-84wnx\" (UID: \"c74b9b64-5f7c-462a-85e9-a7eacaf2824e\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.815922 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwm46\" (UniqueName: \"kubernetes.io/projected/e9ad1d55-9cdf-42bd-89c4-47a4ad0150db-kube-api-access-qwm46\") pod \"keystone-operator-controller-manager-7765d96ddf-t95cb\" (UID: \"e9ad1d55-9cdf-42bd-89c4-47a4ad0150db\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.815986 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f25rl\" (UniqueName: \"kubernetes.io/projected/4e506cfc-ad57-47e4-91ff-c4779cec4258-kube-api-access-f25rl\") pod \"horizon-operator-controller-manager-68c6d99b8f-wwrh9\" (UID: \"4e506cfc-ad57-47e4-91ff-c4779cec4258\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.831641 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.838702 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dg2hg\" (UniqueName: \"kubernetes.io/projected/c74b9b64-5f7c-462a-85e9-a7eacaf2824e-kube-api-access-dg2hg\") pod \"glance-operator-controller-manager-5697bb5779-84wnx\" (UID: \"c74b9b64-5f7c-462a-85e9-a7eacaf2824e\") " pod="openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.842146 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-77grd"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.843133 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7txc9\" (UniqueName: \"kubernetes.io/projected/203fedfb-ba93-4cb0-afd5-a01607b4f40d-kube-api-access-7txc9\") pod \"heat-operator-controller-manager-5f64f6f8bb-w9xfr\" (UID: \"203fedfb-ba93-4cb0-afd5-a01607b4f40d\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.843577 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.843649 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-77grd" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.845692 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-qj52j" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.864403 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.865625 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.869254 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-hlnbc" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.874140 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-77grd"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.894512 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.916724 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.919958 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgknk\" (UniqueName: \"kubernetes.io/projected/4309c558-54b8-4034-85db-c4ca159600ad-kube-api-access-fgknk\") pod \"manila-operator-controller-manager-5b5fd79c9c-j554b\" (UID: \"4309c558-54b8-4034-85db-c4ca159600ad\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.920022 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvt2r\" (UniqueName: \"kubernetes.io/projected/53fab550-e4ee-4601-bc21-e93b60ee3788-kube-api-access-kvt2r\") pod \"ironic-operator-controller-manager-967d97867-5zl6k\" (UID: \"53fab550-e4ee-4601-bc21-e93b60ee3788\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.920047 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5st2\" (UniqueName: \"kubernetes.io/projected/4206f75a-b9be-4d84-806b-0dea3aab1823-kube-api-access-g5st2\") pod \"infra-operator-controller-manager-78d48bff9d-gxgfp\" (UID: \"4206f75a-b9be-4d84-806b-0dea3aab1823\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.920111 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert\") pod \"infra-operator-controller-manager-78d48bff9d-gxgfp\" (UID: \"4206f75a-b9be-4d84-806b-0dea3aab1823\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.920175 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5t4d\" (UniqueName: \"kubernetes.io/projected/9fd7b8b4-5bdd-4d3c-859e-78ba9cced0b6-kube-api-access-w5t4d\") pod \"mariadb-operator-controller-manager-79c8c4686c-gb69x\" (UID: \"9fd7b8b4-5bdd-4d3c-859e-78ba9cced0b6\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.920263 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj6p4\" (UniqueName: \"kubernetes.io/projected/07197f67-1348-456f-a655-d3a418542e85-kube-api-access-sj6p4\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-4f28g\" (UID: \"07197f67-1348-456f-a655-d3a418542e85\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.920289 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwm46\" (UniqueName: \"kubernetes.io/projected/e9ad1d55-9cdf-42bd-89c4-47a4ad0150db-kube-api-access-qwm46\") pod \"keystone-operator-controller-manager-7765d96ddf-t95cb\" (UID: \"e9ad1d55-9cdf-42bd-89c4-47a4ad0150db\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.920371 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f25rl\" (UniqueName: \"kubernetes.io/projected/4e506cfc-ad57-47e4-91ff-c4779cec4258-kube-api-access-f25rl\") pod \"horizon-operator-controller-manager-68c6d99b8f-wwrh9\" (UID: \"4e506cfc-ad57-47e4-91ff-c4779cec4258\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9" Dec 10 00:51:56 crc kubenswrapper[4884]: E1210 00:51:56.920870 4884 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 00:51:56 crc kubenswrapper[4884]: E1210 00:51:56.920924 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert podName:4206f75a-b9be-4d84-806b-0dea3aab1823 nodeName:}" failed. No retries permitted until 2025-12-10 00:51:57.42090728 +0000 UTC m=+1290.498864397 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert") pod "infra-operator-controller-manager-78d48bff9d-gxgfp" (UID: "4206f75a-b9be-4d84-806b-0dea3aab1823") : secret "infra-operator-webhook-server-cert" not found Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.942041 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwm46\" (UniqueName: \"kubernetes.io/projected/e9ad1d55-9cdf-42bd-89c4-47a4ad0150db-kube-api-access-qwm46\") pod \"keystone-operator-controller-manager-7765d96ddf-t95cb\" (UID: \"e9ad1d55-9cdf-42bd-89c4-47a4ad0150db\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.947512 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.947599 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f25rl\" (UniqueName: \"kubernetes.io/projected/4e506cfc-ad57-47e4-91ff-c4779cec4258-kube-api-access-f25rl\") pod \"horizon-operator-controller-manager-68c6d99b8f-wwrh9\" (UID: \"4e506cfc-ad57-47e4-91ff-c4779cec4258\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.948127 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.949821 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5st2\" (UniqueName: \"kubernetes.io/projected/4206f75a-b9be-4d84-806b-0dea3aab1823-kube-api-access-g5st2\") pod \"infra-operator-controller-manager-78d48bff9d-gxgfp\" (UID: \"4206f75a-b9be-4d84-806b-0dea3aab1823\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.953244 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvt2r\" (UniqueName: \"kubernetes.io/projected/53fab550-e4ee-4601-bc21-e93b60ee3788-kube-api-access-kvt2r\") pod \"ironic-operator-controller-manager-967d97867-5zl6k\" (UID: \"53fab550-e4ee-4601-bc21-e93b60ee3788\") " pod="openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.956197 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.961042 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-xkdrg" Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.984131 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg"] Dec 10 00:51:56 crc kubenswrapper[4884]: I1210 00:51:56.996342 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.048923 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.083990 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgknk\" (UniqueName: \"kubernetes.io/projected/4309c558-54b8-4034-85db-c4ca159600ad-kube-api-access-fgknk\") pod \"manila-operator-controller-manager-5b5fd79c9c-j554b\" (UID: \"4309c558-54b8-4034-85db-c4ca159600ad\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.084051 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hw9t2\" (UniqueName: \"kubernetes.io/projected/c818d687-5ceb-4124-9ca3-be82851aa092-kube-api-access-hw9t2\") pod \"nova-operator-controller-manager-697bc559fc-77grd\" (UID: \"c818d687-5ceb-4124-9ca3-be82851aa092\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-77grd" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.084148 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5t4d\" (UniqueName: \"kubernetes.io/projected/9fd7b8b4-5bdd-4d3c-859e-78ba9cced0b6-kube-api-access-w5t4d\") pod \"mariadb-operator-controller-manager-79c8c4686c-gb69x\" (UID: \"9fd7b8b4-5bdd-4d3c-859e-78ba9cced0b6\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.084181 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmcwv\" (UniqueName: \"kubernetes.io/projected/9295a029-0033-437d-a315-0549c7dc31aa-kube-api-access-lmcwv\") pod \"octavia-operator-controller-manager-998648c74-s5dkz\" (UID: \"9295a029-0033-437d-a315-0549c7dc31aa\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.084243 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sj6p4\" (UniqueName: \"kubernetes.io/projected/07197f67-1348-456f-a655-d3a418542e85-kube-api-access-sj6p4\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-4f28g\" (UID: \"07197f67-1348-456f-a655-d3a418542e85\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.088484 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.093712 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.097664 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.098002 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-zt5tx" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.104007 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-64fqx"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.137797 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.139622 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-64fqx" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.141194 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-lzvxl" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.163647 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj6p4\" (UniqueName: \"kubernetes.io/projected/07197f67-1348-456f-a655-d3a418542e85-kube-api-access-sj6p4\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-4f28g\" (UID: \"07197f67-1348-456f-a655-d3a418542e85\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.172820 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgknk\" (UniqueName: \"kubernetes.io/projected/4309c558-54b8-4034-85db-c4ca159600ad-kube-api-access-fgknk\") pod \"manila-operator-controller-manager-5b5fd79c9c-j554b\" (UID: \"4309c558-54b8-4034-85db-c4ca159600ad\") " pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.181586 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-64fqx"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.194240 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5t4d\" (UniqueName: \"kubernetes.io/projected/9fd7b8b4-5bdd-4d3c-859e-78ba9cced0b6-kube-api-access-w5t4d\") pod \"mariadb-operator-controller-manager-79c8c4686c-gb69x\" (UID: \"9fd7b8b4-5bdd-4d3c-859e-78ba9cced0b6\") " pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.195254 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmcwv\" (UniqueName: \"kubernetes.io/projected/9295a029-0033-437d-a315-0549c7dc31aa-kube-api-access-lmcwv\") pod \"octavia-operator-controller-manager-998648c74-s5dkz\" (UID: \"9295a029-0033-437d-a315-0549c7dc31aa\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.195341 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spnc8\" (UniqueName: \"kubernetes.io/projected/69dc456a-37e7-45e5-8bc0-943cae050bd7-kube-api-access-spnc8\") pod \"ovn-operator-controller-manager-b6456fdb6-x65hg\" (UID: \"69dc456a-37e7-45e5-8bc0-943cae050bd7\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.195414 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hw9t2\" (UniqueName: \"kubernetes.io/projected/c818d687-5ceb-4124-9ca3-be82851aa092-kube-api-access-hw9t2\") pod \"nova-operator-controller-manager-697bc559fc-77grd\" (UID: \"c818d687-5ceb-4124-9ca3-be82851aa092\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-77grd" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.206122 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.227753 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.235465 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.236724 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.246788 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-n2rd5" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.250641 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hw9t2\" (UniqueName: \"kubernetes.io/projected/c818d687-5ceb-4124-9ca3-be82851aa092-kube-api-access-hw9t2\") pod \"nova-operator-controller-manager-697bc559fc-77grd\" (UID: \"c818d687-5ceb-4124-9ca3-be82851aa092\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-77grd" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.250708 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.258120 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.262657 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.263865 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.284887 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.285024 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.291504 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmcwv\" (UniqueName: \"kubernetes.io/projected/9295a029-0033-437d-a315-0549c7dc31aa-kube-api-access-lmcwv\") pod \"octavia-operator-controller-manager-998648c74-s5dkz\" (UID: \"9295a029-0033-437d-a315-0549c7dc31aa\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.292710 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-ckd5z" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.296371 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8frdj\" (UniqueName: \"kubernetes.io/projected/3607d45b-2333-4034-aa0b-830759f88204-kube-api-access-8frdj\") pod \"openstack-baremetal-operator-controller-manager-84b575879fxkb8j\" (UID: \"3607d45b-2333-4034-aa0b-830759f88204\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.296448 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fxkb8j\" (UID: \"3607d45b-2333-4034-aa0b-830759f88204\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.296479 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmsmh\" (UniqueName: \"kubernetes.io/projected/d5ce6700-aa2a-4a81-a4db-214dc5cd0305-kube-api-access-dmsmh\") pod \"placement-operator-controller-manager-78f8948974-64fqx\" (UID: \"d5ce6700-aa2a-4a81-a4db-214dc5cd0305\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-64fqx" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.296532 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spnc8\" (UniqueName: \"kubernetes.io/projected/69dc456a-37e7-45e5-8bc0-943cae050bd7-kube-api-access-spnc8\") pod \"ovn-operator-controller-manager-b6456fdb6-x65hg\" (UID: \"69dc456a-37e7-45e5-8bc0-943cae050bd7\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.324721 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spnc8\" (UniqueName: \"kubernetes.io/projected/69dc456a-37e7-45e5-8bc0-943cae050bd7-kube-api-access-spnc8\") pod \"ovn-operator-controller-manager-b6456fdb6-x65hg\" (UID: \"69dc456a-37e7-45e5-8bc0-943cae050bd7\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.345690 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.347134 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-77grd" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.388712 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-vrrck"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.395739 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-vrrck"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.395790 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.398618 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.400154 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8frdj\" (UniqueName: \"kubernetes.io/projected/3607d45b-2333-4034-aa0b-830759f88204-kube-api-access-8frdj\") pod \"openstack-baremetal-operator-controller-manager-84b575879fxkb8j\" (UID: \"3607d45b-2333-4034-aa0b-830759f88204\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.400215 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fxkb8j\" (UID: \"3607d45b-2333-4034-aa0b-830759f88204\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.400239 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slwnn\" (UniqueName: \"kubernetes.io/projected/04af4b93-00cd-4e1a-ad1e-84b438ff9b5f-kube-api-access-slwnn\") pod \"swift-operator-controller-manager-9d58d64bc-k7vwz\" (UID: \"04af4b93-00cd-4e1a-ad1e-84b438ff9b5f\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.400265 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmsmh\" (UniqueName: \"kubernetes.io/projected/d5ce6700-aa2a-4a81-a4db-214dc5cd0305-kube-api-access-dmsmh\") pod \"placement-operator-controller-manager-78f8948974-64fqx\" (UID: \"d5ce6700-aa2a-4a81-a4db-214dc5cd0305\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-64fqx" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.400319 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h66sv\" (UniqueName: \"kubernetes.io/projected/6302d211-8a02-4734-9206-7ff93939d971-kube-api-access-h66sv\") pod \"telemetry-operator-controller-manager-6fcddf47c-zdm75\" (UID: \"6302d211-8a02-4734-9206-7ff93939d971\") " pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" Dec 10 00:51:57 crc kubenswrapper[4884]: E1210 00:51:57.400689 4884 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 00:51:57 crc kubenswrapper[4884]: E1210 00:51:57.400742 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert podName:3607d45b-2333-4034-aa0b-830759f88204 nodeName:}" failed. No retries permitted until 2025-12-10 00:51:57.900726375 +0000 UTC m=+1290.978683492 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fxkb8j" (UID: "3607d45b-2333-4034-aa0b-830759f88204") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.400788 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.400695 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.406216 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-xzngl" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.406289 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-hz5lj" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.415501 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.416583 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.423050 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.423529 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.424085 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-5b2l6" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.427701 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8frdj\" (UniqueName: \"kubernetes.io/projected/3607d45b-2333-4034-aa0b-830759f88204-kube-api-access-8frdj\") pod \"openstack-baremetal-operator-controller-manager-84b575879fxkb8j\" (UID: \"3607d45b-2333-4034-aa0b-830759f88204\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.432805 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.435882 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmsmh\" (UniqueName: \"kubernetes.io/projected/d5ce6700-aa2a-4a81-a4db-214dc5cd0305-kube-api-access-dmsmh\") pod \"placement-operator-controller-manager-78f8948974-64fqx\" (UID: \"d5ce6700-aa2a-4a81-a4db-214dc5cd0305\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-64fqx" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.470420 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.471662 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-64fqx" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.473733 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.475818 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.487353 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-rwq66" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.507720 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert\") pod \"infra-operator-controller-manager-78d48bff9d-gxgfp\" (UID: \"4206f75a-b9be-4d84-806b-0dea3aab1823\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.507774 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjs6k\" (UniqueName: \"kubernetes.io/projected/25ce6a20-18da-43c1-b3e4-70b2ca9185e0-kube-api-access-sjs6k\") pod \"test-operator-controller-manager-5854674fcc-vrrck\" (UID: \"25ce6a20-18da-43c1-b3e4-70b2ca9185e0\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.507885 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slwnn\" (UniqueName: \"kubernetes.io/projected/04af4b93-00cd-4e1a-ad1e-84b438ff9b5f-kube-api-access-slwnn\") pod \"swift-operator-controller-manager-9d58d64bc-k7vwz\" (UID: \"04af4b93-00cd-4e1a-ad1e-84b438ff9b5f\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.507980 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cnwd\" (UniqueName: \"kubernetes.io/projected/99111a7e-4213-48ad-aa8e-71205314a433-kube-api-access-4cnwd\") pod \"watcher-operator-controller-manager-667bd8d554-cwql6\" (UID: \"99111a7e-4213-48ad-aa8e-71205314a433\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.508022 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h66sv\" (UniqueName: \"kubernetes.io/projected/6302d211-8a02-4734-9206-7ff93939d971-kube-api-access-h66sv\") pod \"telemetry-operator-controller-manager-6fcddf47c-zdm75\" (UID: \"6302d211-8a02-4734-9206-7ff93939d971\") " pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" Dec 10 00:51:57 crc kubenswrapper[4884]: E1210 00:51:57.508322 4884 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 00:51:57 crc kubenswrapper[4884]: E1210 00:51:57.508378 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert podName:4206f75a-b9be-4d84-806b-0dea3aab1823 nodeName:}" failed. No retries permitted until 2025-12-10 00:51:58.50836312 +0000 UTC m=+1291.586320237 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert") pod "infra-operator-controller-manager-78d48bff9d-gxgfp" (UID: "4206f75a-b9be-4d84-806b-0dea3aab1823") : secret "infra-operator-webhook-server-cert" not found Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.513544 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.555743 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h66sv\" (UniqueName: \"kubernetes.io/projected/6302d211-8a02-4734-9206-7ff93939d971-kube-api-access-h66sv\") pod \"telemetry-operator-controller-manager-6fcddf47c-zdm75\" (UID: \"6302d211-8a02-4734-9206-7ff93939d971\") " pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.556016 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slwnn\" (UniqueName: \"kubernetes.io/projected/04af4b93-00cd-4e1a-ad1e-84b438ff9b5f-kube-api-access-slwnn\") pod \"swift-operator-controller-manager-9d58d64bc-k7vwz\" (UID: \"04af4b93-00cd-4e1a-ad1e-84b438ff9b5f\") " pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.593805 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.609168 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwfgk\" (UniqueName: \"kubernetes.io/projected/effda10d-9f01-4d20-8f0b-23ff781864d2-kube-api-access-fwfgk\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.609234 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cnwd\" (UniqueName: \"kubernetes.io/projected/99111a7e-4213-48ad-aa8e-71205314a433-kube-api-access-4cnwd\") pod \"watcher-operator-controller-manager-667bd8d554-cwql6\" (UID: \"99111a7e-4213-48ad-aa8e-71205314a433\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.609262 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.609295 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26648\" (UniqueName: \"kubernetes.io/projected/0fdff275-feeb-4244-8032-75d06452776a-kube-api-access-26648\") pod \"rabbitmq-cluster-operator-manager-668c99d594-h2m6r\" (UID: \"0fdff275-feeb-4244-8032-75d06452776a\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.609315 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjs6k\" (UniqueName: \"kubernetes.io/projected/25ce6a20-18da-43c1-b3e4-70b2ca9185e0-kube-api-access-sjs6k\") pod \"test-operator-controller-manager-5854674fcc-vrrck\" (UID: \"25ce6a20-18da-43c1-b3e4-70b2ca9185e0\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.609398 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.635258 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cnwd\" (UniqueName: \"kubernetes.io/projected/99111a7e-4213-48ad-aa8e-71205314a433-kube-api-access-4cnwd\") pod \"watcher-operator-controller-manager-667bd8d554-cwql6\" (UID: \"99111a7e-4213-48ad-aa8e-71205314a433\") " pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.635911 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjs6k\" (UniqueName: \"kubernetes.io/projected/25ce6a20-18da-43c1-b3e4-70b2ca9185e0-kube-api-access-sjs6k\") pod \"test-operator-controller-manager-5854674fcc-vrrck\" (UID: \"25ce6a20-18da-43c1-b3e4-70b2ca9185e0\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.710910 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:51:57 crc kubenswrapper[4884]: E1210 00:51:57.711259 4884 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 00:51:57 crc kubenswrapper[4884]: E1210 00:51:57.711323 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs podName:effda10d-9f01-4d20-8f0b-23ff781864d2 nodeName:}" failed. No retries permitted until 2025-12-10 00:51:58.211301823 +0000 UTC m=+1291.289258940 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs") pod "openstack-operator-controller-manager-6d5bb94f9c-zvfmc" (UID: "effda10d-9f01-4d20-8f0b-23ff781864d2") : secret "metrics-server-cert" not found Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.712090 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwfgk\" (UniqueName: \"kubernetes.io/projected/effda10d-9f01-4d20-8f0b-23ff781864d2-kube-api-access-fwfgk\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.713418 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.713503 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26648\" (UniqueName: \"kubernetes.io/projected/0fdff275-feeb-4244-8032-75d06452776a-kube-api-access-26648\") pod \"rabbitmq-cluster-operator-manager-668c99d594-h2m6r\" (UID: \"0fdff275-feeb-4244-8032-75d06452776a\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r" Dec 10 00:51:57 crc kubenswrapper[4884]: E1210 00:51:57.725899 4884 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 00:51:57 crc kubenswrapper[4884]: E1210 00:51:57.725985 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs podName:effda10d-9f01-4d20-8f0b-23ff781864d2 nodeName:}" failed. No retries permitted until 2025-12-10 00:51:58.225965873 +0000 UTC m=+1291.303922990 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs") pod "openstack-operator-controller-manager-6d5bb94f9c-zvfmc" (UID: "effda10d-9f01-4d20-8f0b-23ff781864d2") : secret "webhook-server-cert" not found Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.792418 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26648\" (UniqueName: \"kubernetes.io/projected/0fdff275-feeb-4244-8032-75d06452776a-kube-api-access-26648\") pod \"rabbitmq-cluster-operator-manager-668c99d594-h2m6r\" (UID: \"0fdff275-feeb-4244-8032-75d06452776a\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.793160 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwfgk\" (UniqueName: \"kubernetes.io/projected/effda10d-9f01-4d20-8f0b-23ff781864d2-kube-api-access-fwfgk\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.807329 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.832623 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8"] Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.918087 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.923041 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fxkb8j\" (UID: \"3607d45b-2333-4034-aa0b-830759f88204\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:51:57 crc kubenswrapper[4884]: E1210 00:51:57.923231 4884 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 00:51:57 crc kubenswrapper[4884]: E1210 00:51:57.923280 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert podName:3607d45b-2333-4034-aa0b-830759f88204 nodeName:}" failed. No retries permitted until 2025-12-10 00:51:58.923265156 +0000 UTC m=+1292.001222273 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fxkb8j" (UID: "3607d45b-2333-4034-aa0b-830759f88204") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.932107 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" Dec 10 00:51:57 crc kubenswrapper[4884]: I1210 00:51:57.977701 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r" Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.087317 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8" event={"ID":"c355d3e8-66ee-46b5-8979-d94efb631d6a","Type":"ContainerStarted","Data":"012ea3bc8a2eb9156efc022aa5bd53929ade0b3062172c1eb6565e8ae59f40a9"} Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.227863 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.227947 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:51:58 crc kubenswrapper[4884]: E1210 00:51:58.228133 4884 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 00:51:58 crc kubenswrapper[4884]: E1210 00:51:58.228155 4884 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 00:51:58 crc kubenswrapper[4884]: E1210 00:51:58.228202 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs podName:effda10d-9f01-4d20-8f0b-23ff781864d2 nodeName:}" failed. No retries permitted until 2025-12-10 00:51:59.228184104 +0000 UTC m=+1292.306141221 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs") pod "openstack-operator-controller-manager-6d5bb94f9c-zvfmc" (UID: "effda10d-9f01-4d20-8f0b-23ff781864d2") : secret "metrics-server-cert" not found Dec 10 00:51:58 crc kubenswrapper[4884]: E1210 00:51:58.228219 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs podName:effda10d-9f01-4d20-8f0b-23ff781864d2 nodeName:}" failed. No retries permitted until 2025-12-10 00:51:59.228212604 +0000 UTC m=+1292.306169711 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs") pod "openstack-operator-controller-manager-6d5bb94f9c-zvfmc" (UID: "effda10d-9f01-4d20-8f0b-23ff781864d2") : secret "webhook-server-cert" not found Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.516972 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx"] Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.528295 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz"] Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.537928 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9"] Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.541751 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert\") pod \"infra-operator-controller-manager-78d48bff9d-gxgfp\" (UID: \"4206f75a-b9be-4d84-806b-0dea3aab1823\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:51:58 crc kubenswrapper[4884]: E1210 00:51:58.541961 4884 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 00:51:58 crc kubenswrapper[4884]: E1210 00:51:58.542044 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert podName:4206f75a-b9be-4d84-806b-0dea3aab1823 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:00.542019769 +0000 UTC m=+1293.619976886 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert") pod "infra-operator-controller-manager-78d48bff9d-gxgfp" (UID: "4206f75a-b9be-4d84-806b-0dea3aab1823") : secret "infra-operator-webhook-server-cert" not found Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.546743 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6"] Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.747149 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr"] Dec 10 00:51:58 crc kubenswrapper[4884]: W1210 00:51:58.750678 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69dc456a_37e7_45e5_8bc0_943cae050bd7.slice/crio-d639202f3f60b2c07f2b0962ad51b62ca529241e8fbb0d3b1b86fef65ed5ed44 WatchSource:0}: Error finding container d639202f3f60b2c07f2b0962ad51b62ca529241e8fbb0d3b1b86fef65ed5ed44: Status 404 returned error can't find the container with id d639202f3f60b2c07f2b0962ad51b62ca529241e8fbb0d3b1b86fef65ed5ed44 Dec 10 00:51:58 crc kubenswrapper[4884]: W1210 00:51:58.757411 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9fd7b8b4_5bdd_4d3c_859e_78ba9cced0b6.slice/crio-31b2a537cc14a9c3726b56737af1cf079838d6fe83e3dc5926d1a88e12c0c21f WatchSource:0}: Error finding container 31b2a537cc14a9c3726b56737af1cf079838d6fe83e3dc5926d1a88e12c0c21f: Status 404 returned error can't find the container with id 31b2a537cc14a9c3726b56737af1cf079838d6fe83e3dc5926d1a88e12c0c21f Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.760600 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-77grd"] Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.767915 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg"] Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.774547 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x"] Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.778624 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-64fqx"] Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.785972 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb"] Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.792120 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k"] Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.811771 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g"] Dec 10 00:51:58 crc kubenswrapper[4884]: E1210 00:51:58.829687 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sj6p4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5fdfd5b6b5-4f28g_openstack-operators(07197f67-1348-456f-a655-d3a418542e85): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:58 crc kubenswrapper[4884]: E1210 00:51:58.832859 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sj6p4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5fdfd5b6b5-4f28g_openstack-operators(07197f67-1348-456f-a655-d3a418542e85): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:58 crc kubenswrapper[4884]: E1210 00:51:58.834121 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" podUID="07197f67-1348-456f-a655-d3a418542e85" Dec 10 00:51:58 crc kubenswrapper[4884]: I1210 00:51:58.947881 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fxkb8j\" (UID: \"3607d45b-2333-4034-aa0b-830759f88204\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:51:58 crc kubenswrapper[4884]: E1210 00:51:58.948117 4884 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 00:51:58 crc kubenswrapper[4884]: E1210 00:51:58.948165 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert podName:3607d45b-2333-4034-aa0b-830759f88204 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:00.948150391 +0000 UTC m=+1294.026107498 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fxkb8j" (UID: "3607d45b-2333-4034-aa0b-830759f88204") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.064705 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz"] Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.076694 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz"] Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.090844 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.129.56.41:5001/openstack-k8s-operators/telemetry-operator:c4794e7165126ca78a1af546bb4ba50c90b5c4e1,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h66sv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-6fcddf47c-zdm75_openstack-operators(6302d211-8a02-4734-9206-7ff93939d971): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.093384 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h66sv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-6fcddf47c-zdm75_openstack-operators(6302d211-8a02-4734-9206-7ff93939d971): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.094710 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" podUID="6302d211-8a02-4734-9206-7ff93939d971" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.094932 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lmcwv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-s5dkz_openstack-operators(9295a029-0033-437d-a315-0549c7dc31aa): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.098417 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lmcwv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-s5dkz_openstack-operators(9295a029-0033-437d-a315-0549c7dc31aa): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:59 crc kubenswrapper[4884]: W1210 00:51:59.099248 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4309c558_54b8_4034_85db_c4ca159600ad.slice/crio-f3d4b839bfb00f95e31f37ff6aabda3d885ced91dbb29d4904c78320f98f79c9 WatchSource:0}: Error finding container f3d4b839bfb00f95e31f37ff6aabda3d885ced91dbb29d4904c78320f98f79c9: Status 404 returned error can't find the container with id f3d4b839bfb00f95e31f37ff6aabda3d885ced91dbb29d4904c78320f98f79c9 Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.099514 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" podUID="9295a029-0033-437d-a315-0549c7dc31aa" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.100034 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6b3e0302608a2e70f9b5ae9167f6fbf59264f226d9db99d48f70466ab2f216b8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4cnwd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-667bd8d554-cwql6_openstack-operators(99111a7e-4213-48ad-aa8e-71205314a433): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.103280 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75"] Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.103799 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4cnwd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-667bd8d554-cwql6_openstack-operators(99111a7e-4213-48ad-aa8e-71205314a433): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.104522 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fgknk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5b5fd79c9c-j554b_openstack-operators(4309c558-54b8-4034-85db-c4ca159600ad): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.104941 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" podUID="99111a7e-4213-48ad-aa8e-71205314a433" Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.105799 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" event={"ID":"9295a029-0033-437d-a315-0549c7dc31aa","Type":"ContainerStarted","Data":"48de81e85a497e73a6916479ece43d3c48805afda0806a6291d7f4007138b31d"} Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.106192 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sjs6k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-vrrck_openstack-operators(25ce6a20-18da-43c1-b3e4-70b2ca9185e0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:59 crc kubenswrapper[4884]: W1210 00:51:59.110547 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0fdff275_feeb_4244_8032_75d06452776a.slice/crio-6515fe9b9896881c5143ca5446be42c839d59c1e2fbd2c7e3d22b5782546db5e WatchSource:0}: Error finding container 6515fe9b9896881c5143ca5446be42c839d59c1e2fbd2c7e3d22b5782546db5e: Status 404 returned error can't find the container with id 6515fe9b9896881c5143ca5446be42c839d59c1e2fbd2c7e3d22b5782546db5e Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.110685 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sjs6k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-vrrck_openstack-operators(25ce6a20-18da-43c1-b3e4-70b2ca9185e0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.110788 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fgknk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5b5fd79c9c-j554b_openstack-operators(4309c558-54b8-4034-85db-c4ca159600ad): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.111837 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" podUID="25ce6a20-18da-43c1-b3e4-70b2ca9185e0" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.111881 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" podUID="9295a029-0033-437d-a315-0549c7dc31aa" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.111900 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" podUID="4309c558-54b8-4034-85db-c4ca159600ad" Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.112851 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr" event={"ID":"203fedfb-ba93-4cb0-afd5-a01607b4f40d","Type":"ContainerStarted","Data":"01f74f5dc69ab50becabd456cad566f2601308e1de99c44621aa28257af5f5a7"} Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.113045 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-26648,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-h2m6r_openstack-operators(0fdff275-feeb-4244-8032-75d06452776a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.115212 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r" podUID="0fdff275-feeb-4244-8032-75d06452776a" Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.121125 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" event={"ID":"6302d211-8a02-4734-9206-7ff93939d971","Type":"ContainerStarted","Data":"f130640c819d139ffdbe7b602256ea78225fb55c0477968ce17316c7b63cce0a"} Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.128565 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6" event={"ID":"6953ae1f-46db-410e-b79a-7eff9b687850","Type":"ContainerStarted","Data":"95779e4726e61f7df2ee95128eb793067a2e4d46ff35da316aa823455816aa7d"} Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.134506 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6"] Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.136756 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x" event={"ID":"9fd7b8b4-5bdd-4d3c-859e-78ba9cced0b6","Type":"ContainerStarted","Data":"31b2a537cc14a9c3726b56737af1cf079838d6fe83e3dc5926d1a88e12c0c21f"} Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.136847 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.129.56.41:5001/openstack-k8s-operators/telemetry-operator:c4794e7165126ca78a1af546bb4ba50c90b5c4e1\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" podUID="6302d211-8a02-4734-9206-7ff93939d971" Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.146697 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz" event={"ID":"04af4b93-00cd-4e1a-ad1e-84b438ff9b5f","Type":"ContainerStarted","Data":"706494b2744ff066b1e56fe1970c7991a52c4ee4a2e637d78961b39c86a6c665"} Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.151295 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r"] Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.151322 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz" event={"ID":"c88f7e0d-d880-42ce-96fd-a1d1ec7be33f","Type":"ContainerStarted","Data":"637735fe7905a99656ef890e325c99292c6a4af2a786302e52946e999142a53e"} Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.157526 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b"] Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.158960 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" event={"ID":"07197f67-1348-456f-a655-d3a418542e85","Type":"ContainerStarted","Data":"642865aa95b1d5486143c3cc198eae9766a3d357db6b2396e32e433015fb24cc"} Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.160171 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-77grd" event={"ID":"c818d687-5ceb-4124-9ca3-be82851aa092","Type":"ContainerStarted","Data":"774f9d52403622f975f57cf011c5472a79d2e07f7748a07f962027d1d708faf6"} Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.160749 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" podUID="07197f67-1348-456f-a655-d3a418542e85" Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.161342 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx" event={"ID":"c74b9b64-5f7c-462a-85e9-a7eacaf2824e","Type":"ContainerStarted","Data":"7d92a91e6f62ca1f6cc9806acd265c8df0f2ef7282e2ec6b76558b33656cffc1"} Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.162663 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg" event={"ID":"69dc456a-37e7-45e5-8bc0-943cae050bd7","Type":"ContainerStarted","Data":"d639202f3f60b2c07f2b0962ad51b62ca529241e8fbb0d3b1b86fef65ed5ed44"} Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.164467 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-vrrck"] Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.166609 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k" event={"ID":"53fab550-e4ee-4601-bc21-e93b60ee3788","Type":"ContainerStarted","Data":"6a282e229e9ccb63444270c4fa6618bb77a42286b552c0ef865ce20cb769fbce"} Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.168056 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb" event={"ID":"e9ad1d55-9cdf-42bd-89c4-47a4ad0150db","Type":"ContainerStarted","Data":"ed07b54c85a1f75117716084d8906af515697d144cf2b882aae2fcadc6ceddaf"} Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.169306 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9" event={"ID":"4e506cfc-ad57-47e4-91ff-c4779cec4258","Type":"ContainerStarted","Data":"83ab7ae50245357d5557beafa687b0c29ddc8949e119de3c008c0d1ccaac94b0"} Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.170337 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-64fqx" event={"ID":"d5ce6700-aa2a-4a81-a4db-214dc5cd0305","Type":"ContainerStarted","Data":"af349b41edb8ed8b8b3c5f18b367436f1f3ea5a49a3eb4b08967d668b0525419"} Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.254502 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:51:59 crc kubenswrapper[4884]: I1210 00:51:59.254585 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.254743 4884 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.254792 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs podName:effda10d-9f01-4d20-8f0b-23ff781864d2 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:01.254775684 +0000 UTC m=+1294.332732791 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs") pod "openstack-operator-controller-manager-6d5bb94f9c-zvfmc" (UID: "effda10d-9f01-4d20-8f0b-23ff781864d2") : secret "webhook-server-cert" not found Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.255419 4884 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 00:51:59 crc kubenswrapper[4884]: E1210 00:51:59.257192 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs podName:effda10d-9f01-4d20-8f0b-23ff781864d2 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:01.257176618 +0000 UTC m=+1294.335133725 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs") pod "openstack-operator-controller-manager-6d5bb94f9c-zvfmc" (UID: "effda10d-9f01-4d20-8f0b-23ff781864d2") : secret "metrics-server-cert" not found Dec 10 00:52:00 crc kubenswrapper[4884]: I1210 00:52:00.183303 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r" event={"ID":"0fdff275-feeb-4244-8032-75d06452776a","Type":"ContainerStarted","Data":"6515fe9b9896881c5143ca5446be42c839d59c1e2fbd2c7e3d22b5782546db5e"} Dec 10 00:52:00 crc kubenswrapper[4884]: E1210 00:52:00.187027 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r" podUID="0fdff275-feeb-4244-8032-75d06452776a" Dec 10 00:52:00 crc kubenswrapper[4884]: I1210 00:52:00.187703 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" event={"ID":"4309c558-54b8-4034-85db-c4ca159600ad","Type":"ContainerStarted","Data":"f3d4b839bfb00f95e31f37ff6aabda3d885ced91dbb29d4904c78320f98f79c9"} Dec 10 00:52:00 crc kubenswrapper[4884]: I1210 00:52:00.189592 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" event={"ID":"99111a7e-4213-48ad-aa8e-71205314a433","Type":"ContainerStarted","Data":"a1c51ded22a71198a724a0ee90e52210e99a1e204c2bc94fa7a70a7fb1e0c6ff"} Dec 10 00:52:00 crc kubenswrapper[4884]: E1210 00:52:00.190218 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" podUID="4309c558-54b8-4034-85db-c4ca159600ad" Dec 10 00:52:00 crc kubenswrapper[4884]: E1210 00:52:00.191092 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6b3e0302608a2e70f9b5ae9167f6fbf59264f226d9db99d48f70466ab2f216b8\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" podUID="99111a7e-4213-48ad-aa8e-71205314a433" Dec 10 00:52:00 crc kubenswrapper[4884]: I1210 00:52:00.191105 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" event={"ID":"25ce6a20-18da-43c1-b3e4-70b2ca9185e0","Type":"ContainerStarted","Data":"180bb054579db801cd75640abcb69a55ab6ddf99a7eaa225e0f2e14bb35f8d1e"} Dec 10 00:52:00 crc kubenswrapper[4884]: E1210 00:52:00.198102 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.129.56.41:5001/openstack-k8s-operators/telemetry-operator:c4794e7165126ca78a1af546bb4ba50c90b5c4e1\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" podUID="6302d211-8a02-4734-9206-7ff93939d971" Dec 10 00:52:00 crc kubenswrapper[4884]: E1210 00:52:00.198124 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" podUID="25ce6a20-18da-43c1-b3e4-70b2ca9185e0" Dec 10 00:52:00 crc kubenswrapper[4884]: E1210 00:52:00.215564 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" podUID="07197f67-1348-456f-a655-d3a418542e85" Dec 10 00:52:00 crc kubenswrapper[4884]: E1210 00:52:00.215718 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" podUID="9295a029-0033-437d-a315-0549c7dc31aa" Dec 10 00:52:00 crc kubenswrapper[4884]: I1210 00:52:00.593624 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert\") pod \"infra-operator-controller-manager-78d48bff9d-gxgfp\" (UID: \"4206f75a-b9be-4d84-806b-0dea3aab1823\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:52:00 crc kubenswrapper[4884]: E1210 00:52:00.593923 4884 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 00:52:00 crc kubenswrapper[4884]: E1210 00:52:00.594035 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert podName:4206f75a-b9be-4d84-806b-0dea3aab1823 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:04.594015799 +0000 UTC m=+1297.671972916 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert") pod "infra-operator-controller-manager-78d48bff9d-gxgfp" (UID: "4206f75a-b9be-4d84-806b-0dea3aab1823") : secret "infra-operator-webhook-server-cert" not found Dec 10 00:52:00 crc kubenswrapper[4884]: I1210 00:52:00.999867 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fxkb8j\" (UID: \"3607d45b-2333-4034-aa0b-830759f88204\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:52:01 crc kubenswrapper[4884]: E1210 00:52:01.000038 4884 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 00:52:01 crc kubenswrapper[4884]: E1210 00:52:01.000122 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert podName:3607d45b-2333-4034-aa0b-830759f88204 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:05.00009926 +0000 UTC m=+1298.078056437 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fxkb8j" (UID: "3607d45b-2333-4034-aa0b-830759f88204") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 00:52:01 crc kubenswrapper[4884]: E1210 00:52:01.201601 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r" podUID="0fdff275-feeb-4244-8032-75d06452776a" Dec 10 00:52:01 crc kubenswrapper[4884]: E1210 00:52:01.202418 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" podUID="25ce6a20-18da-43c1-b3e4-70b2ca9185e0" Dec 10 00:52:01 crc kubenswrapper[4884]: E1210 00:52:01.203308 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6b3e0302608a2e70f9b5ae9167f6fbf59264f226d9db99d48f70466ab2f216b8\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" podUID="99111a7e-4213-48ad-aa8e-71205314a433" Dec 10 00:52:01 crc kubenswrapper[4884]: E1210 00:52:01.207718 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:44126f9c6b1d2bf752ddf989e20a4fc4cc1c07723d4fcb78465ccb2f55da6b3a\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" podUID="4309c558-54b8-4034-85db-c4ca159600ad" Dec 10 00:52:01 crc kubenswrapper[4884]: I1210 00:52:01.304925 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:52:01 crc kubenswrapper[4884]: I1210 00:52:01.305005 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:52:01 crc kubenswrapper[4884]: E1210 00:52:01.305158 4884 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 00:52:01 crc kubenswrapper[4884]: E1210 00:52:01.305200 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs podName:effda10d-9f01-4d20-8f0b-23ff781864d2 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:05.305187031 +0000 UTC m=+1298.383144138 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs") pod "openstack-operator-controller-manager-6d5bb94f9c-zvfmc" (UID: "effda10d-9f01-4d20-8f0b-23ff781864d2") : secret "webhook-server-cert" not found Dec 10 00:52:01 crc kubenswrapper[4884]: E1210 00:52:01.305977 4884 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 00:52:01 crc kubenswrapper[4884]: E1210 00:52:01.306019 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs podName:effda10d-9f01-4d20-8f0b-23ff781864d2 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:05.306007434 +0000 UTC m=+1298.383964551 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs") pod "openstack-operator-controller-manager-6d5bb94f9c-zvfmc" (UID: "effda10d-9f01-4d20-8f0b-23ff781864d2") : secret "metrics-server-cert" not found Dec 10 00:52:04 crc kubenswrapper[4884]: I1210 00:52:04.682839 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert\") pod \"infra-operator-controller-manager-78d48bff9d-gxgfp\" (UID: \"4206f75a-b9be-4d84-806b-0dea3aab1823\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:52:04 crc kubenswrapper[4884]: E1210 00:52:04.683020 4884 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 10 00:52:04 crc kubenswrapper[4884]: E1210 00:52:04.683547 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert podName:4206f75a-b9be-4d84-806b-0dea3aab1823 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:12.683524131 +0000 UTC m=+1305.761481268 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert") pod "infra-operator-controller-manager-78d48bff9d-gxgfp" (UID: "4206f75a-b9be-4d84-806b-0dea3aab1823") : secret "infra-operator-webhook-server-cert" not found Dec 10 00:52:05 crc kubenswrapper[4884]: I1210 00:52:05.089681 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fxkb8j\" (UID: \"3607d45b-2333-4034-aa0b-830759f88204\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:52:05 crc kubenswrapper[4884]: E1210 00:52:05.089913 4884 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 00:52:05 crc kubenswrapper[4884]: E1210 00:52:05.089957 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert podName:3607d45b-2333-4034-aa0b-830759f88204 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:13.089943421 +0000 UTC m=+1306.167900528 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert") pod "openstack-baremetal-operator-controller-manager-84b575879fxkb8j" (UID: "3607d45b-2333-4034-aa0b-830759f88204") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 10 00:52:05 crc kubenswrapper[4884]: I1210 00:52:05.394078 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:52:05 crc kubenswrapper[4884]: I1210 00:52:05.394388 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:52:05 crc kubenswrapper[4884]: E1210 00:52:05.394942 4884 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 00:52:05 crc kubenswrapper[4884]: E1210 00:52:05.395002 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs podName:effda10d-9f01-4d20-8f0b-23ff781864d2 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:13.394983992 +0000 UTC m=+1306.472941109 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs") pod "openstack-operator-controller-manager-6d5bb94f9c-zvfmc" (UID: "effda10d-9f01-4d20-8f0b-23ff781864d2") : secret "webhook-server-cert" not found Dec 10 00:52:05 crc kubenswrapper[4884]: E1210 00:52:05.396367 4884 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 10 00:52:05 crc kubenswrapper[4884]: E1210 00:52:05.396485 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs podName:effda10d-9f01-4d20-8f0b-23ff781864d2 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:13.396465082 +0000 UTC m=+1306.474422199 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs") pod "openstack-operator-controller-manager-6d5bb94f9c-zvfmc" (UID: "effda10d-9f01-4d20-8f0b-23ff781864d2") : secret "metrics-server-cert" not found Dec 10 00:52:11 crc kubenswrapper[4884]: E1210 00:52:11.556010 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:5bdb3685be3ddc1efd62e16aaf2fa96ead64315e26d52b1b2a7d8ac01baa1e87" Dec 10 00:52:11 crc kubenswrapper[4884]: E1210 00:52:11.556468 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:5bdb3685be3ddc1efd62e16aaf2fa96ead64315e26d52b1b2a7d8ac01baa1e87,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kvt2r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-967d97867-5zl6k_openstack-operators(53fab550-e4ee-4601-bc21-e93b60ee3788): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:52:12 crc kubenswrapper[4884]: E1210 00:52:12.294371 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:c4abfc148600dfa85915f3dc911d988ea2335f26cb6b8d749fe79bfe53e5e429" Dec 10 00:52:12 crc kubenswrapper[4884]: E1210 00:52:12.294757 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:c4abfc148600dfa85915f3dc911d988ea2335f26cb6b8d749fe79bfe53e5e429,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7txc9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5f64f6f8bb-w9xfr_openstack-operators(203fedfb-ba93-4cb0-afd5-a01607b4f40d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:52:12 crc kubenswrapper[4884]: I1210 00:52:12.757345 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert\") pod \"infra-operator-controller-manager-78d48bff9d-gxgfp\" (UID: \"4206f75a-b9be-4d84-806b-0dea3aab1823\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:52:12 crc kubenswrapper[4884]: I1210 00:52:12.764357 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4206f75a-b9be-4d84-806b-0dea3aab1823-cert\") pod \"infra-operator-controller-manager-78d48bff9d-gxgfp\" (UID: \"4206f75a-b9be-4d84-806b-0dea3aab1823\") " pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:52:12 crc kubenswrapper[4884]: I1210 00:52:12.919550 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:52:13 crc kubenswrapper[4884]: I1210 00:52:13.164386 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fxkb8j\" (UID: \"3607d45b-2333-4034-aa0b-830759f88204\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:52:13 crc kubenswrapper[4884]: I1210 00:52:13.169986 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3607d45b-2333-4034-aa0b-830759f88204-cert\") pod \"openstack-baremetal-operator-controller-manager-84b575879fxkb8j\" (UID: \"3607d45b-2333-4034-aa0b-830759f88204\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:52:13 crc kubenswrapper[4884]: I1210 00:52:13.352655 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:52:13 crc kubenswrapper[4884]: I1210 00:52:13.469031 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:52:13 crc kubenswrapper[4884]: E1210 00:52:13.469146 4884 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 10 00:52:13 crc kubenswrapper[4884]: E1210 00:52:13.469248 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs podName:effda10d-9f01-4d20-8f0b-23ff781864d2 nodeName:}" failed. No retries permitted until 2025-12-10 00:52:29.469199743 +0000 UTC m=+1322.547156860 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs") pod "openstack-operator-controller-manager-6d5bb94f9c-zvfmc" (UID: "effda10d-9f01-4d20-8f0b-23ff781864d2") : secret "webhook-server-cert" not found Dec 10 00:52:13 crc kubenswrapper[4884]: I1210 00:52:13.469283 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:52:13 crc kubenswrapper[4884]: I1210 00:52:13.474024 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-metrics-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:52:18 crc kubenswrapper[4884]: I1210 00:52:18.098518 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:52:18 crc kubenswrapper[4884]: I1210 00:52:18.099037 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:52:18 crc kubenswrapper[4884]: E1210 00:52:18.846474 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670" Dec 10 00:52:18 crc kubenswrapper[4884]: E1210 00:52:18.847080 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hw9t2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-77grd_openstack-operators(c818d687-5ceb-4124-9ca3-be82851aa092): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:52:19 crc kubenswrapper[4884]: I1210 00:52:19.369507 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz" event={"ID":"c88f7e0d-d880-42ce-96fd-a1d1ec7be33f","Type":"ContainerStarted","Data":"60ffb0c1f070271066eb02117bd2aaf76bf4a0d030fcd779db96d2afab8e1eb2"} Dec 10 00:52:19 crc kubenswrapper[4884]: I1210 00:52:19.394404 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6" event={"ID":"6953ae1f-46db-410e-b79a-7eff9b687850","Type":"ContainerStarted","Data":"15717221477f616ae373f5a733cba0906663196dbd86c3f70c3041de18cccd13"} Dec 10 00:52:19 crc kubenswrapper[4884]: I1210 00:52:19.396026 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-64fqx" event={"ID":"d5ce6700-aa2a-4a81-a4db-214dc5cd0305","Type":"ContainerStarted","Data":"847b286bba07ae95db23cdd40fde3f9b0f37604009c0baf0031ded11327fad16"} Dec 10 00:52:19 crc kubenswrapper[4884]: I1210 00:52:19.410055 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp"] Dec 10 00:52:19 crc kubenswrapper[4884]: I1210 00:52:19.478611 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j"] Dec 10 00:52:19 crc kubenswrapper[4884]: W1210 00:52:19.630585 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4206f75a_b9be_4d84_806b_0dea3aab1823.slice/crio-3d9b080e4a8cb1194e397c77d7e281deab929ab29069734369bc4e1b2eedd002 WatchSource:0}: Error finding container 3d9b080e4a8cb1194e397c77d7e281deab929ab29069734369bc4e1b2eedd002: Status 404 returned error can't find the container with id 3d9b080e4a8cb1194e397c77d7e281deab929ab29069734369bc4e1b2eedd002 Dec 10 00:52:19 crc kubenswrapper[4884]: W1210 00:52:19.668639 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3607d45b_2333_4034_aa0b_830759f88204.slice/crio-fdaa2b8d1924c2139f271204bee4828950eb8dc962994387738b8abecb3291af WatchSource:0}: Error finding container fdaa2b8d1924c2139f271204bee4828950eb8dc962994387738b8abecb3291af: Status 404 returned error can't find the container with id fdaa2b8d1924c2139f271204bee4828950eb8dc962994387738b8abecb3291af Dec 10 00:52:20 crc kubenswrapper[4884]: I1210 00:52:20.433081 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x" event={"ID":"9fd7b8b4-5bdd-4d3c-859e-78ba9cced0b6","Type":"ContainerStarted","Data":"b38b42a6c410b6fbf2c000c3b90f8ea41bacb67eee5e8691452526b68fcc2367"} Dec 10 00:52:20 crc kubenswrapper[4884]: I1210 00:52:20.435171 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg" event={"ID":"69dc456a-37e7-45e5-8bc0-943cae050bd7","Type":"ContainerStarted","Data":"e5320e1c7ebe96a39c9139d4230f05e71285a2ac0c5ae19557981a708da53155"} Dec 10 00:52:20 crc kubenswrapper[4884]: I1210 00:52:20.436795 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz" event={"ID":"04af4b93-00cd-4e1a-ad1e-84b438ff9b5f","Type":"ContainerStarted","Data":"c62b3e6583fecfaeb94a85b5c4006e7d54441a17c62a9ee13abd682b0545fa53"} Dec 10 00:52:20 crc kubenswrapper[4884]: I1210 00:52:20.438347 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx" event={"ID":"c74b9b64-5f7c-462a-85e9-a7eacaf2824e","Type":"ContainerStarted","Data":"9c3a61e4fc55e35c41b449b01847b227285a0d8d84cdaf0ec36e81ad1da1d878"} Dec 10 00:52:20 crc kubenswrapper[4884]: I1210 00:52:20.440653 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb" event={"ID":"e9ad1d55-9cdf-42bd-89c4-47a4ad0150db","Type":"ContainerStarted","Data":"0fd19d4baa2bc4dc1ccab9aa299fe962dbe6a2ea76bb1387b7707d9427457d24"} Dec 10 00:52:20 crc kubenswrapper[4884]: I1210 00:52:20.443658 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9" event={"ID":"4e506cfc-ad57-47e4-91ff-c4779cec4258","Type":"ContainerStarted","Data":"918c3117f4b99d2ccbdd892377b2877ebcec14adf662d34fd442e6f2e353fa5f"} Dec 10 00:52:20 crc kubenswrapper[4884]: I1210 00:52:20.444654 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" event={"ID":"3607d45b-2333-4034-aa0b-830759f88204","Type":"ContainerStarted","Data":"fdaa2b8d1924c2139f271204bee4828950eb8dc962994387738b8abecb3291af"} Dec 10 00:52:20 crc kubenswrapper[4884]: I1210 00:52:20.445657 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" event={"ID":"4206f75a-b9be-4d84-806b-0dea3aab1823","Type":"ContainerStarted","Data":"3d9b080e4a8cb1194e397c77d7e281deab929ab29069734369bc4e1b2eedd002"} Dec 10 00:52:20 crc kubenswrapper[4884]: I1210 00:52:20.447163 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8" event={"ID":"c355d3e8-66ee-46b5-8979-d94efb631d6a","Type":"ContainerStarted","Data":"c0003b2230d0eecae371faf8e821d23b377407189aa137c83d2523c22ad6edbe"} Dec 10 00:52:29 crc kubenswrapper[4884]: E1210 00:52:29.553830 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr" podUID="203fedfb-ba93-4cb0-afd5-a01607b4f40d" Dec 10 00:52:29 crc kubenswrapper[4884]: I1210 00:52:29.554235 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:52:29 crc kubenswrapper[4884]: E1210 00:52:29.563886 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-77grd" podUID="c818d687-5ceb-4124-9ca3-be82851aa092" Dec 10 00:52:29 crc kubenswrapper[4884]: I1210 00:52:29.570554 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" event={"ID":"25ce6a20-18da-43c1-b3e4-70b2ca9185e0","Type":"ContainerStarted","Data":"ecf3859748ccc36c9fdfbe1834bc385441e474ff498731bccd40b8d7ea7772c3"} Dec 10 00:52:29 crc kubenswrapper[4884]: I1210 00:52:29.577379 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/effda10d-9f01-4d20-8f0b-23ff781864d2-webhook-certs\") pod \"openstack-operator-controller-manager-6d5bb94f9c-zvfmc\" (UID: \"effda10d-9f01-4d20-8f0b-23ff781864d2\") " pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:52:29 crc kubenswrapper[4884]: I1210 00:52:29.664263 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" event={"ID":"9295a029-0033-437d-a315-0549c7dc31aa","Type":"ContainerStarted","Data":"c75bc033ca9dc1c51b2553ac6f5d6bd8a7833f2fad9f9f611e62b47c30dc7f87"} Dec 10 00:52:29 crc kubenswrapper[4884]: I1210 00:52:29.732917 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" event={"ID":"4206f75a-b9be-4d84-806b-0dea3aab1823","Type":"ContainerStarted","Data":"be56301bd9731ada10bedf127ea73bfe1ee174030953c6f78f0ec690a7ee1123"} Dec 10 00:52:29 crc kubenswrapper[4884]: I1210 00:52:29.751275 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" event={"ID":"99111a7e-4213-48ad-aa8e-71205314a433","Type":"ContainerStarted","Data":"d834571b67cc2f1204cff8fd16b0ac24191f28252b70a8dc0fdb869558231732"} Dec 10 00:52:29 crc kubenswrapper[4884]: I1210 00:52:29.757812 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-5b2l6" Dec 10 00:52:29 crc kubenswrapper[4884]: I1210 00:52:29.768525 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:52:29 crc kubenswrapper[4884]: I1210 00:52:29.780576 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr" event={"ID":"203fedfb-ba93-4cb0-afd5-a01607b4f40d","Type":"ContainerStarted","Data":"fce223474616cd421388961a80cde2f73e92c45fe0512cc5fd805be69738dcd2"} Dec 10 00:52:29 crc kubenswrapper[4884]: I1210 00:52:29.810029 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" event={"ID":"6302d211-8a02-4734-9206-7ff93939d971","Type":"ContainerStarted","Data":"00fc80d5dc9f09eb7b897375766e8fa7b9ec75de3c8acce26d6141afb9e114ad"} Dec 10 00:52:29 crc kubenswrapper[4884]: I1210 00:52:29.836289 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" event={"ID":"4309c558-54b8-4034-85db-c4ca159600ad","Type":"ContainerStarted","Data":"ccdc0571a0502225a47ad60056fe378cd20c50751bed0276a8581e634907e908"} Dec 10 00:52:30 crc kubenswrapper[4884]: E1210 00:52:30.058852 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k" podUID="53fab550-e4ee-4601-bc21-e93b60ee3788" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.376815 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc"] Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.851308 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg" event={"ID":"69dc456a-37e7-45e5-8bc0-943cae050bd7","Type":"ContainerStarted","Data":"bd00f5c64f98d1ee1ac02886c6df6dbbede6d6118fa66e0e00834a6b5ddfdf62"} Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.851708 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.853756 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.859454 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k" event={"ID":"53fab550-e4ee-4601-bc21-e93b60ee3788","Type":"ContainerStarted","Data":"c6110d86b6a214e18e14cd2d5ca9264115875b9380dafc6f50fb7d677e9feae0"} Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.862521 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" event={"ID":"6302d211-8a02-4734-9206-7ff93939d971","Type":"ContainerStarted","Data":"8cf50c363afb01a36afdebb54e237fa29dfda8d5fa288ad86e92556959d70417"} Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.862956 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.870613 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" event={"ID":"99111a7e-4213-48ad-aa8e-71205314a433","Type":"ContainerStarted","Data":"7c97326f99588918d7111eb3ebf447da260387e403d7356ae9dfeae1e641a476"} Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.870838 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.886907 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-x65hg" podStartSLOduration=4.759417737 podStartE2EDuration="34.886891157s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:58.756294954 +0000 UTC m=+1291.834252071" lastFinishedPulling="2025-12-10 00:52:28.883768364 +0000 UTC m=+1321.961725491" observedRunningTime="2025-12-10 00:52:30.881456261 +0000 UTC m=+1323.959413388" watchObservedRunningTime="2025-12-10 00:52:30.886891157 +0000 UTC m=+1323.964848264" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.890152 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-78f8948974-64fqx" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.894381 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-78f8948974-64fqx" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.904612 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz" event={"ID":"04af4b93-00cd-4e1a-ad1e-84b438ff9b5f","Type":"ContainerStarted","Data":"a77cfa9fed40c34733aa07d9b10e387b6f684ce729a17d6a3c8097b670acbd1a"} Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.905357 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.916775 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.918209 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz" event={"ID":"c88f7e0d-d880-42ce-96fd-a1d1ec7be33f","Type":"ContainerStarted","Data":"257583eab0c3fd45914fda6e458f2326ad8e331262641c69e0556a89dd72f24d"} Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.919185 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.923142 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.930604 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6" event={"ID":"6953ae1f-46db-410e-b79a-7eff9b687850","Type":"ContainerStarted","Data":"8f27f3278fd7d8dc2ec2a1f88d472c78ad831bc750b241b1f625ebbf3645ccb3"} Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.931504 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.938067 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.954002 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-77grd" event={"ID":"c818d687-5ceb-4124-9ca3-be82851aa092","Type":"ContainerStarted","Data":"81685bc6db55e81a59ca37aec44fb7a801c3ec538dd819e5b580b42c3a100d4e"} Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.970168 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx" event={"ID":"c74b9b64-5f7c-462a-85e9-a7eacaf2824e","Type":"ContainerStarted","Data":"410b5c8365c887d25013da6e827bae7c82dda85d7b0771ecc6ae1f67ae8cae9b"} Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.971002 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.980141 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" podStartSLOduration=4.385076734 podStartE2EDuration="33.980118139s" podCreationTimestamp="2025-12-10 00:51:57 +0000 UTC" firstStartedPulling="2025-12-10 00:51:59.099815419 +0000 UTC m=+1292.177772536" lastFinishedPulling="2025-12-10 00:52:28.694856824 +0000 UTC m=+1321.772813941" observedRunningTime="2025-12-10 00:52:30.969864844 +0000 UTC m=+1324.047821971" watchObservedRunningTime="2025-12-10 00:52:30.980118139 +0000 UTC m=+1324.058075266" Dec 10 00:52:30 crc kubenswrapper[4884]: I1210 00:52:30.988298 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.003817 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" event={"ID":"07197f67-1348-456f-a655-d3a418542e85","Type":"ContainerStarted","Data":"bd94ade2d82c176f876ba50cae2af6712149ef3caa4020cae866b6b757233df2"} Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.003868 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" event={"ID":"07197f67-1348-456f-a655-d3a418542e85","Type":"ContainerStarted","Data":"c54b761dba3108a1bef1d6be8e2b0a3fcc019d9291daa138dbc7ce107690c680"} Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.004595 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" podStartSLOduration=5.404888132 podStartE2EDuration="35.004577749s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:59.090687906 +0000 UTC m=+1292.168645023" lastFinishedPulling="2025-12-10 00:52:28.690377523 +0000 UTC m=+1321.768334640" observedRunningTime="2025-12-10 00:52:31.00313594 +0000 UTC m=+1324.081093067" watchObservedRunningTime="2025-12-10 00:52:31.004577749 +0000 UTC m=+1324.082534876" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.006574 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.017588 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" event={"ID":"4309c558-54b8-4034-85db-c4ca159600ad","Type":"ContainerStarted","Data":"cb66c738a8bcef48d4be13518a2980b1946a0f42d6d96e387e0d0a924e601afc"} Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.018287 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.042215 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-5697bb5779-84wnx" podStartSLOduration=4.645195512 podStartE2EDuration="35.042188763s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:58.532547187 +0000 UTC m=+1291.610504334" lastFinishedPulling="2025-12-10 00:52:28.929540468 +0000 UTC m=+1322.007497585" observedRunningTime="2025-12-10 00:52:31.024836774 +0000 UTC m=+1324.102793901" watchObservedRunningTime="2025-12-10 00:52:31.042188763 +0000 UTC m=+1324.120145910" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.043260 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" event={"ID":"4206f75a-b9be-4d84-806b-0dea3aab1823","Type":"ContainerStarted","Data":"4cf077d5db47be806c682181c22b4fae2b0cc7f7af6ea0fcb3aa2f99789508cb"} Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.044156 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.072966 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x" event={"ID":"9fd7b8b4-5bdd-4d3c-859e-78ba9cced0b6","Type":"ContainerStarted","Data":"2e82b8d70a75f784e844e3baa86f476e200d1f5f7e54a354908ed7e78d891e4f"} Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.077661 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-697fb699cf-gdkh6" podStartSLOduration=4.701138093 podStartE2EDuration="35.077640458s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:58.552879538 +0000 UTC m=+1291.630836655" lastFinishedPulling="2025-12-10 00:52:28.929381883 +0000 UTC m=+1322.007339020" observedRunningTime="2025-12-10 00:52:31.063452385 +0000 UTC m=+1324.141409532" watchObservedRunningTime="2025-12-10 00:52:31.077640458 +0000 UTC m=+1324.155597575" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.078590 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.079819 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9" event={"ID":"4e506cfc-ad57-47e4-91ff-c4779cec4258","Type":"ContainerStarted","Data":"db49e32b45ae4046420106928452d667a287d029febad68a126215b10190ebef"} Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.084996 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.085492 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.092385 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r" event={"ID":"0fdff275-feeb-4244-8032-75d06452776a","Type":"ContainerStarted","Data":"021e115924dc084420db1d68818d26c3f31953c424151f7043383045b6da99b1"} Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.104064 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.138911 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb" event={"ID":"e9ad1d55-9cdf-42bd-89c4-47a4ad0150db","Type":"ContainerStarted","Data":"bca0631a3acbfb671cbd8c5e4c46f990217d32de35c8045b4f0ad3b17a4ec7d6"} Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.140618 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-78f8948974-64fqx" podStartSLOduration=4.999674381 podStartE2EDuration="35.140597045s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:58.788418328 +0000 UTC m=+1291.866375445" lastFinishedPulling="2025-12-10 00:52:28.929340982 +0000 UTC m=+1322.007298109" observedRunningTime="2025-12-10 00:52:31.104329667 +0000 UTC m=+1324.182286794" watchObservedRunningTime="2025-12-10 00:52:31.140597045 +0000 UTC m=+1324.218554162" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.143738 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.150396 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-9d58d64bc-k7vwz" podStartSLOduration=5.3043685289999996 podStartE2EDuration="35.150375428s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:59.07208211 +0000 UTC m=+1292.150039227" lastFinishedPulling="2025-12-10 00:52:28.918088999 +0000 UTC m=+1321.996046126" observedRunningTime="2025-12-10 00:52:31.138296953 +0000 UTC m=+1324.216254080" watchObservedRunningTime="2025-12-10 00:52:31.150375428 +0000 UTC m=+1324.228332545" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.152303 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" event={"ID":"effda10d-9f01-4d20-8f0b-23ff781864d2","Type":"ContainerStarted","Data":"ff347182e2e9a4d193048a4b6b31cff336fcf69ff9ded527d7c67c88976158d9"} Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.152363 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.153958 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.159325 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-w6tvz" podStartSLOduration=4.918052333 podStartE2EDuration="35.159307538s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:58.535063814 +0000 UTC m=+1291.613020931" lastFinishedPulling="2025-12-10 00:52:28.776319019 +0000 UTC m=+1321.854276136" observedRunningTime="2025-12-10 00:52:31.158885627 +0000 UTC m=+1324.236842754" watchObservedRunningTime="2025-12-10 00:52:31.159307538 +0000 UTC m=+1324.237264655" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.181218 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" event={"ID":"25ce6a20-18da-43c1-b3e4-70b2ca9185e0","Type":"ContainerStarted","Data":"d73af46d8a213ec2b6a7ff8f53735747ed179f3f9c51cae6040932c671f4a03e"} Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.182002 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.221909 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" podStartSLOduration=5.5619799709999995 podStartE2EDuration="35.221884975s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:59.104334579 +0000 UTC m=+1292.182291706" lastFinishedPulling="2025-12-10 00:52:28.764239563 +0000 UTC m=+1321.842196710" observedRunningTime="2025-12-10 00:52:31.185094184 +0000 UTC m=+1324.263051311" watchObservedRunningTime="2025-12-10 00:52:31.221884975 +0000 UTC m=+1324.299842092" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.225702 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" event={"ID":"3607d45b-2333-4034-aa0b-830759f88204","Type":"ContainerStarted","Data":"080b17bb09cea65fcf8eef6c3da0246f4725557b608c88dd29c198a5936b0c42"} Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.225744 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" event={"ID":"3607d45b-2333-4034-aa0b-830759f88204","Type":"ContainerStarted","Data":"a40551ab5ebc22b43a4d30b53de2823be6a320438875037ae05e735181e95cba"} Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.226443 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.233154 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-t95cb" podStartSLOduration=5.230212462 podStartE2EDuration="35.233140909s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:58.793800602 +0000 UTC m=+1291.871757719" lastFinishedPulling="2025-12-10 00:52:28.796729019 +0000 UTC m=+1321.874686166" observedRunningTime="2025-12-10 00:52:31.226731195 +0000 UTC m=+1324.304688322" watchObservedRunningTime="2025-12-10 00:52:31.233140909 +0000 UTC m=+1324.311098026" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.251921 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" podStartSLOduration=5.317303287 podStartE2EDuration="35.251905194s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:58.829551124 +0000 UTC m=+1291.907508241" lastFinishedPulling="2025-12-10 00:52:28.764153031 +0000 UTC m=+1321.842110148" observedRunningTime="2025-12-10 00:52:31.244605578 +0000 UTC m=+1324.322562715" watchObservedRunningTime="2025-12-10 00:52:31.251905194 +0000 UTC m=+1324.329862311" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.301417 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-wwrh9" podStartSLOduration=4.959728864 podStartE2EDuration="35.301399588s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:58.540337993 +0000 UTC m=+1291.618295110" lastFinishedPulling="2025-12-10 00:52:28.882008717 +0000 UTC m=+1321.959965834" observedRunningTime="2025-12-10 00:52:31.265577892 +0000 UTC m=+1324.343535009" watchObservedRunningTime="2025-12-10 00:52:31.301399588 +0000 UTC m=+1324.379356705" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.304983 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h2m6r" podStartSLOduration=4.530759877 podStartE2EDuration="34.304969834s" podCreationTimestamp="2025-12-10 00:51:57 +0000 UTC" firstStartedPulling="2025-12-10 00:51:59.112937668 +0000 UTC m=+1292.190894795" lastFinishedPulling="2025-12-10 00:52:28.887147645 +0000 UTC m=+1321.965104752" observedRunningTime="2025-12-10 00:52:31.294689797 +0000 UTC m=+1324.372646914" watchObservedRunningTime="2025-12-10 00:52:31.304969834 +0000 UTC m=+1324.382926951" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.374392 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-79c8c4686c-gb69x" podStartSLOduration=5.203217672 podStartE2EDuration="35.374377575s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:58.770833141 +0000 UTC m=+1291.848790258" lastFinishedPulling="2025-12-10 00:52:28.941993044 +0000 UTC m=+1322.019950161" observedRunningTime="2025-12-10 00:52:31.32078918 +0000 UTC m=+1324.398746307" watchObservedRunningTime="2025-12-10 00:52:31.374377575 +0000 UTC m=+1324.452334692" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.376646 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" podStartSLOduration=34.376638776 podStartE2EDuration="34.376638776s" podCreationTimestamp="2025-12-10 00:51:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:52:31.367302374 +0000 UTC m=+1324.445259511" watchObservedRunningTime="2025-12-10 00:52:31.376638776 +0000 UTC m=+1324.454595893" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.397537 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" podStartSLOduration=26.35766313 podStartE2EDuration="35.397521748s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:52:19.656121406 +0000 UTC m=+1312.734078523" lastFinishedPulling="2025-12-10 00:52:28.695980024 +0000 UTC m=+1321.773937141" observedRunningTime="2025-12-10 00:52:31.392219205 +0000 UTC m=+1324.470176342" watchObservedRunningTime="2025-12-10 00:52:31.397521748 +0000 UTC m=+1324.475478865" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.482053 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" podStartSLOduration=26.387264638 podStartE2EDuration="35.482032916s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:52:19.684621774 +0000 UTC m=+1312.762578891" lastFinishedPulling="2025-12-10 00:52:28.779390052 +0000 UTC m=+1321.857347169" observedRunningTime="2025-12-10 00:52:31.466691922 +0000 UTC m=+1324.544649039" watchObservedRunningTime="2025-12-10 00:52:31.482032916 +0000 UTC m=+1324.559990033" Dec 10 00:52:31 crc kubenswrapper[4884]: I1210 00:52:31.494138 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" podStartSLOduration=5.910531544 podStartE2EDuration="35.494122862s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:59.106057935 +0000 UTC m=+1292.184015052" lastFinishedPulling="2025-12-10 00:52:28.689649253 +0000 UTC m=+1321.767606370" observedRunningTime="2025-12-10 00:52:31.484836992 +0000 UTC m=+1324.562794129" watchObservedRunningTime="2025-12-10 00:52:31.494122862 +0000 UTC m=+1324.572079979" Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.234630 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" event={"ID":"9295a029-0033-437d-a315-0549c7dc31aa","Type":"ContainerStarted","Data":"1b83c9fd3c0c27c15908b1cee3247a5752c06d0d32ed1b40a68836cbed1ed3fd"} Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.235128 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.237456 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr" event={"ID":"203fedfb-ba93-4cb0-afd5-a01607b4f40d","Type":"ContainerStarted","Data":"2d368f529d1a92ae1726eb1837f3e038c7ec841546edc3329d11e62a22bb740a"} Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.239170 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-77grd" event={"ID":"c818d687-5ceb-4124-9ca3-be82851aa092","Type":"ContainerStarted","Data":"b78e9afe49b53738dddb1c040ec806e67a63f08aec5966bef20a0650d14dcee9"} Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.239266 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-77grd" Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.240964 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8" event={"ID":"c355d3e8-66ee-46b5-8979-d94efb631d6a","Type":"ContainerStarted","Data":"b4f7969be2283d3fdfb2b0f42a688b38d51fc37d605051d7f60017ca95c68495"} Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.241185 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8" Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.242627 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" event={"ID":"effda10d-9f01-4d20-8f0b-23ff781864d2","Type":"ContainerStarted","Data":"0313c71ea65a70b14ef256fe59af6e042f26f339058fa6130001705d387bf938"} Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.243699 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8" Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.245011 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-64fqx" event={"ID":"d5ce6700-aa2a-4a81-a4db-214dc5cd0305","Type":"ContainerStarted","Data":"8c83ce0722e9015a1b111eb7a769b4ad1a68b613739bfe00941d829ca501b677"} Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.249278 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k" event={"ID":"53fab550-e4ee-4601-bc21-e93b60ee3788","Type":"ContainerStarted","Data":"14a608d1aa731b0b54aced7888640d452abcf5133e234d8a5d93babbbd6c4641"} Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.249349 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k" Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.256662 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" podStartSLOduration=7.090934177 podStartE2EDuration="36.256647321s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:59.094816255 +0000 UTC m=+1292.172773372" lastFinishedPulling="2025-12-10 00:52:28.260529369 +0000 UTC m=+1321.338486516" observedRunningTime="2025-12-10 00:52:32.251910873 +0000 UTC m=+1325.329868010" watchObservedRunningTime="2025-12-10 00:52:32.256647321 +0000 UTC m=+1325.334604438" Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.273555 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr" podStartSLOduration=4.583717581 podStartE2EDuration="36.273539736s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:58.756821788 +0000 UTC m=+1291.834778905" lastFinishedPulling="2025-12-10 00:52:30.446643943 +0000 UTC m=+1323.524601060" observedRunningTime="2025-12-10 00:52:32.26659352 +0000 UTC m=+1325.344550647" watchObservedRunningTime="2025-12-10 00:52:32.273539736 +0000 UTC m=+1325.351496853" Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.293669 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-77grd" podStartSLOduration=4.593195236 podStartE2EDuration="36.293653078s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:58.756625442 +0000 UTC m=+1291.834582549" lastFinishedPulling="2025-12-10 00:52:30.457083274 +0000 UTC m=+1323.535040391" observedRunningTime="2025-12-10 00:52:32.28855032 +0000 UTC m=+1325.366507437" watchObservedRunningTime="2025-12-10 00:52:32.293653078 +0000 UTC m=+1325.371610195" Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.306325 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k" podStartSLOduration=3.71638991 podStartE2EDuration="36.30630957s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:58.808315978 +0000 UTC m=+1291.886273095" lastFinishedPulling="2025-12-10 00:52:31.398235648 +0000 UTC m=+1324.476192755" observedRunningTime="2025-12-10 00:52:32.305616401 +0000 UTC m=+1325.383573518" watchObservedRunningTime="2025-12-10 00:52:32.30630957 +0000 UTC m=+1325.384266677" Dec 10 00:52:32 crc kubenswrapper[4884]: I1210 00:52:32.329750 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6c677c69b-twll8" podStartSLOduration=5.259020981 podStartE2EDuration="36.329735431s" podCreationTimestamp="2025-12-10 00:51:56 +0000 UTC" firstStartedPulling="2025-12-10 00:51:57.834840072 +0000 UTC m=+1290.912797189" lastFinishedPulling="2025-12-10 00:52:28.905554522 +0000 UTC m=+1321.983511639" observedRunningTime="2025-12-10 00:52:32.32115928 +0000 UTC m=+1325.399116407" watchObservedRunningTime="2025-12-10 00:52:32.329735431 +0000 UTC m=+1325.407692548" Dec 10 00:52:33 crc kubenswrapper[4884]: I1210 00:52:33.260923 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr" Dec 10 00:52:36 crc kubenswrapper[4884]: I1210 00:52:36.953375 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-w9xfr" Dec 10 00:52:37 crc kubenswrapper[4884]: I1210 00:52:37.054968 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-967d97867-5zl6k" Dec 10 00:52:37 crc kubenswrapper[4884]: I1210 00:52:37.231463 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5b5fd79c9c-j554b" Dec 10 00:52:37 crc kubenswrapper[4884]: I1210 00:52:37.306964 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-4f28g" Dec 10 00:52:37 crc kubenswrapper[4884]: I1210 00:52:37.381125 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-77grd" Dec 10 00:52:37 crc kubenswrapper[4884]: I1210 00:52:37.474354 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-998648c74-s5dkz" Dec 10 00:52:37 crc kubenswrapper[4884]: I1210 00:52:37.598007 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-6fcddf47c-zdm75" Dec 10 00:52:37 crc kubenswrapper[4884]: I1210 00:52:37.922959 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-667bd8d554-cwql6" Dec 10 00:52:37 crc kubenswrapper[4884]: I1210 00:52:37.935670 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5854674fcc-vrrck" Dec 10 00:52:39 crc kubenswrapper[4884]: I1210 00:52:39.777318 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-6d5bb94f9c-zvfmc" Dec 10 00:52:42 crc kubenswrapper[4884]: I1210 00:52:42.927926 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-78d48bff9d-gxgfp" Dec 10 00:52:43 crc kubenswrapper[4884]: I1210 00:52:43.365131 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-84b575879fxkb8j" Dec 10 00:52:48 crc kubenswrapper[4884]: I1210 00:52:48.097984 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:52:48 crc kubenswrapper[4884]: I1210 00:52:48.098533 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:52:48 crc kubenswrapper[4884]: I1210 00:52:48.098583 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:52:48 crc kubenswrapper[4884]: I1210 00:52:48.099502 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c301699d2f400451cf0d2a7c3e824313cd2be40d6ee80bb600b0e9f8df69938a"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 00:52:48 crc kubenswrapper[4884]: I1210 00:52:48.099561 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://c301699d2f400451cf0d2a7c3e824313cd2be40d6ee80bb600b0e9f8df69938a" gracePeriod=600 Dec 10 00:52:49 crc kubenswrapper[4884]: I1210 00:52:49.429953 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="c301699d2f400451cf0d2a7c3e824313cd2be40d6ee80bb600b0e9f8df69938a" exitCode=0 Dec 10 00:52:49 crc kubenswrapper[4884]: I1210 00:52:49.430022 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"c301699d2f400451cf0d2a7c3e824313cd2be40d6ee80bb600b0e9f8df69938a"} Dec 10 00:52:49 crc kubenswrapper[4884]: I1210 00:52:49.430142 4884 scope.go:117] "RemoveContainer" containerID="85d53d23856c70f8c66a5944018254f82db2773d6bfbe9b53696ef024e9cffda" Dec 10 00:52:50 crc kubenswrapper[4884]: I1210 00:52:50.441633 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957"} Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.564624 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rbv6d"] Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.566404 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.568208 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.570754 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.571100 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-5k97p" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.571261 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rbv6d"] Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.571272 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.623130 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-2cdg2"] Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.625996 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.630719 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.646651 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-2cdg2"] Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.688677 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stsjj\" (UniqueName: \"kubernetes.io/projected/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1-kube-api-access-stsjj\") pod \"dnsmasq-dns-675f4bcbfc-rbv6d\" (UID: \"14a4ef5f-d8db-4d68-b124-0e38dca5a0f1\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.688946 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1-config\") pod \"dnsmasq-dns-675f4bcbfc-rbv6d\" (UID: \"14a4ef5f-d8db-4d68-b124-0e38dca5a0f1\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.790744 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1-config\") pod \"dnsmasq-dns-675f4bcbfc-rbv6d\" (UID: \"14a4ef5f-d8db-4d68-b124-0e38dca5a0f1\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.790881 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gw6q\" (UniqueName: \"kubernetes.io/projected/eb951b03-9465-4548-a8e1-ceeb8d50494f-kube-api-access-2gw6q\") pod \"dnsmasq-dns-78dd6ddcc-2cdg2\" (UID: \"eb951b03-9465-4548-a8e1-ceeb8d50494f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.790988 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb951b03-9465-4548-a8e1-ceeb8d50494f-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-2cdg2\" (UID: \"eb951b03-9465-4548-a8e1-ceeb8d50494f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.791018 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stsjj\" (UniqueName: \"kubernetes.io/projected/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1-kube-api-access-stsjj\") pod \"dnsmasq-dns-675f4bcbfc-rbv6d\" (UID: \"14a4ef5f-d8db-4d68-b124-0e38dca5a0f1\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.791133 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb951b03-9465-4548-a8e1-ceeb8d50494f-config\") pod \"dnsmasq-dns-78dd6ddcc-2cdg2\" (UID: \"eb951b03-9465-4548-a8e1-ceeb8d50494f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.791942 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1-config\") pod \"dnsmasq-dns-675f4bcbfc-rbv6d\" (UID: \"14a4ef5f-d8db-4d68-b124-0e38dca5a0f1\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.818139 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stsjj\" (UniqueName: \"kubernetes.io/projected/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1-kube-api-access-stsjj\") pod \"dnsmasq-dns-675f4bcbfc-rbv6d\" (UID: \"14a4ef5f-d8db-4d68-b124-0e38dca5a0f1\") " pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.885596 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.893461 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb951b03-9465-4548-a8e1-ceeb8d50494f-config\") pod \"dnsmasq-dns-78dd6ddcc-2cdg2\" (UID: \"eb951b03-9465-4548-a8e1-ceeb8d50494f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.893591 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gw6q\" (UniqueName: \"kubernetes.io/projected/eb951b03-9465-4548-a8e1-ceeb8d50494f-kube-api-access-2gw6q\") pod \"dnsmasq-dns-78dd6ddcc-2cdg2\" (UID: \"eb951b03-9465-4548-a8e1-ceeb8d50494f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.893646 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb951b03-9465-4548-a8e1-ceeb8d50494f-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-2cdg2\" (UID: \"eb951b03-9465-4548-a8e1-ceeb8d50494f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.894745 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb951b03-9465-4548-a8e1-ceeb8d50494f-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-2cdg2\" (UID: \"eb951b03-9465-4548-a8e1-ceeb8d50494f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.894741 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb951b03-9465-4548-a8e1-ceeb8d50494f-config\") pod \"dnsmasq-dns-78dd6ddcc-2cdg2\" (UID: \"eb951b03-9465-4548-a8e1-ceeb8d50494f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.914227 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gw6q\" (UniqueName: \"kubernetes.io/projected/eb951b03-9465-4548-a8e1-ceeb8d50494f-kube-api-access-2gw6q\") pod \"dnsmasq-dns-78dd6ddcc-2cdg2\" (UID: \"eb951b03-9465-4548-a8e1-ceeb8d50494f\") " pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:06 crc kubenswrapper[4884]: I1210 00:53:06.951556 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:07 crc kubenswrapper[4884]: I1210 00:53:07.348358 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rbv6d"] Dec 10 00:53:07 crc kubenswrapper[4884]: I1210 00:53:07.469816 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-2cdg2"] Dec 10 00:53:07 crc kubenswrapper[4884]: I1210 00:53:07.617487 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" event={"ID":"eb951b03-9465-4548-a8e1-ceeb8d50494f","Type":"ContainerStarted","Data":"f673897b1e103d131d7ff20b76b4a9bbe48f40e99c3f9321d7996d6a793be993"} Dec 10 00:53:07 crc kubenswrapper[4884]: I1210 00:53:07.618633 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" event={"ID":"14a4ef5f-d8db-4d68-b124-0e38dca5a0f1","Type":"ContainerStarted","Data":"6726ede1a4059ef62d3d59a8714cd1e04e40394b934a6bd74a6130231468c1ea"} Dec 10 00:53:09 crc kubenswrapper[4884]: I1210 00:53:09.876418 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rbv6d"] Dec 10 00:53:09 crc kubenswrapper[4884]: I1210 00:53:09.932139 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-pptlw"] Dec 10 00:53:09 crc kubenswrapper[4884]: I1210 00:53:09.941929 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:09 crc kubenswrapper[4884]: I1210 00:53:09.957716 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-pptlw"] Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.051729 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb8dr\" (UniqueName: \"kubernetes.io/projected/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-kube-api-access-qb8dr\") pod \"dnsmasq-dns-5ccc8479f9-pptlw\" (UID: \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.051798 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-config\") pod \"dnsmasq-dns-5ccc8479f9-pptlw\" (UID: \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.051837 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-pptlw\" (UID: \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.156404 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-pptlw\" (UID: \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.156839 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb8dr\" (UniqueName: \"kubernetes.io/projected/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-kube-api-access-qb8dr\") pod \"dnsmasq-dns-5ccc8479f9-pptlw\" (UID: \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.156971 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-config\") pod \"dnsmasq-dns-5ccc8479f9-pptlw\" (UID: \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.158089 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-config\") pod \"dnsmasq-dns-5ccc8479f9-pptlw\" (UID: \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.158894 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-pptlw\" (UID: \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.235466 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb8dr\" (UniqueName: \"kubernetes.io/projected/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-kube-api-access-qb8dr\") pod \"dnsmasq-dns-5ccc8479f9-pptlw\" (UID: \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\") " pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.281768 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.389714 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-2cdg2"] Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.398356 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-m2f9c"] Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.400655 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.411560 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-m2f9c"] Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.563020 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2b91caf-ce98-4997-9f34-62031f4fb1f3-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-m2f9c\" (UID: \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\") " pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.563079 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2b91caf-ce98-4997-9f34-62031f4fb1f3-config\") pod \"dnsmasq-dns-57d769cc4f-m2f9c\" (UID: \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\") " pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.563159 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vjbm\" (UniqueName: \"kubernetes.io/projected/a2b91caf-ce98-4997-9f34-62031f4fb1f3-kube-api-access-9vjbm\") pod \"dnsmasq-dns-57d769cc4f-m2f9c\" (UID: \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\") " pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.664687 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2b91caf-ce98-4997-9f34-62031f4fb1f3-config\") pod \"dnsmasq-dns-57d769cc4f-m2f9c\" (UID: \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\") " pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.664773 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vjbm\" (UniqueName: \"kubernetes.io/projected/a2b91caf-ce98-4997-9f34-62031f4fb1f3-kube-api-access-9vjbm\") pod \"dnsmasq-dns-57d769cc4f-m2f9c\" (UID: \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\") " pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.664856 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2b91caf-ce98-4997-9f34-62031f4fb1f3-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-m2f9c\" (UID: \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\") " pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.665685 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2b91caf-ce98-4997-9f34-62031f4fb1f3-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-m2f9c\" (UID: \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\") " pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.666283 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2b91caf-ce98-4997-9f34-62031f4fb1f3-config\") pod \"dnsmasq-dns-57d769cc4f-m2f9c\" (UID: \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\") " pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.703464 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vjbm\" (UniqueName: \"kubernetes.io/projected/a2b91caf-ce98-4997-9f34-62031f4fb1f3-kube-api-access-9vjbm\") pod \"dnsmasq-dns-57d769cc4f-m2f9c\" (UID: \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\") " pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.748299 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:10 crc kubenswrapper[4884]: I1210 00:53:10.880855 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-pptlw"] Dec 10 00:53:10 crc kubenswrapper[4884]: W1210 00:53:10.890829 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9bc12a3c_e2b7_45af_a19d_97fb5a4f7b2a.slice/crio-bda4ea7effc483706cc04a14f1f74fc9895d679860ab8c4ad47356c03ae841d1 WatchSource:0}: Error finding container bda4ea7effc483706cc04a14f1f74fc9895d679860ab8c4ad47356c03ae841d1: Status 404 returned error can't find the container with id bda4ea7effc483706cc04a14f1f74fc9895d679860ab8c4ad47356c03ae841d1 Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.087618 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.089577 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.099855 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.100067 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.100184 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-9czbh" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.100241 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.100297 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.100547 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.100705 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.111365 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.178598 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-m2f9c"] Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.274617 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.274674 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.274698 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.274937 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.275052 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.275092 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkvdf\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-kube-api-access-dkvdf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.275125 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.275289 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.275327 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.275366 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.275426 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.376657 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.376700 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.376725 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.376758 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.376786 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.376813 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.376833 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.376874 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.376908 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.376928 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkvdf\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-kube-api-access-dkvdf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.376986 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.377141 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.377559 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.377582 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.378104 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.378497 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.381932 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.382392 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.382833 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.391546 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.395797 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.401521 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkvdf\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-kube-api-access-dkvdf\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.406951 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.414476 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.492422 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.497679 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.501229 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.502194 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.502489 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.502822 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.502990 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.503229 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.503402 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-88p9q" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.511648 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.585958 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.586015 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-config-data\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.586039 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.586071 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.586094 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.586121 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.586149 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7343006c-fda9-4e2d-8767-41ee4412c601-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.586177 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28w4d\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-kube-api-access-28w4d\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.586197 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7343006c-fda9-4e2d-8767-41ee4412c601-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.586227 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.586279 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.661893 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" event={"ID":"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a","Type":"ContainerStarted","Data":"bda4ea7effc483706cc04a14f1f74fc9895d679860ab8c4ad47356c03ae841d1"} Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.687331 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-config-data\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.687376 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.687412 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.687464 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.687488 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.687509 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7343006c-fda9-4e2d-8767-41ee4412c601-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.687534 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28w4d\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-kube-api-access-28w4d\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.687552 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7343006c-fda9-4e2d-8767-41ee4412c601-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.687580 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.689442 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.689651 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.689693 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.690071 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.690689 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.693142 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-config-data\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.702072 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7343006c-fda9-4e2d-8767-41ee4412c601-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.702459 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.703511 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.704133 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.705534 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.705669 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7343006c-fda9-4e2d-8767-41ee4412c601-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.705879 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28w4d\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-kube-api-access-28w4d\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.730513 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " pod="openstack/rabbitmq-server-0" Dec 10 00:53:11 crc kubenswrapper[4884]: I1210 00:53:11.855491 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.698571 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.702590 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.706997 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-b8kjb" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.707554 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.708291 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.715127 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.726680 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.753686 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.819357 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.819473 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/30c1a0f0-5abf-4fac-89c9-afedea695fab-config-data-generated\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.819533 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30c1a0f0-5abf-4fac-89c9-afedea695fab-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.819621 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/30c1a0f0-5abf-4fac-89c9-afedea695fab-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.819658 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/30c1a0f0-5abf-4fac-89c9-afedea695fab-kolla-config\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.819752 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30c1a0f0-5abf-4fac-89c9-afedea695fab-operator-scripts\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.819781 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6dbh\" (UniqueName: \"kubernetes.io/projected/30c1a0f0-5abf-4fac-89c9-afedea695fab-kube-api-access-c6dbh\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.819827 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/30c1a0f0-5abf-4fac-89c9-afedea695fab-config-data-default\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.921088 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30c1a0f0-5abf-4fac-89c9-afedea695fab-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.921199 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/30c1a0f0-5abf-4fac-89c9-afedea695fab-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.921249 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/30c1a0f0-5abf-4fac-89c9-afedea695fab-kolla-config\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.921302 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30c1a0f0-5abf-4fac-89c9-afedea695fab-operator-scripts\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.921334 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6dbh\" (UniqueName: \"kubernetes.io/projected/30c1a0f0-5abf-4fac-89c9-afedea695fab-kube-api-access-c6dbh\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.921381 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/30c1a0f0-5abf-4fac-89c9-afedea695fab-config-data-default\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.921488 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.921547 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/30c1a0f0-5abf-4fac-89c9-afedea695fab-config-data-generated\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.922112 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.922125 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/30c1a0f0-5abf-4fac-89c9-afedea695fab-config-data-generated\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.922846 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/30c1a0f0-5abf-4fac-89c9-afedea695fab-config-data-default\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.923056 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/30c1a0f0-5abf-4fac-89c9-afedea695fab-kolla-config\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.924383 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30c1a0f0-5abf-4fac-89c9-afedea695fab-operator-scripts\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.928337 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/30c1a0f0-5abf-4fac-89c9-afedea695fab-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.931581 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30c1a0f0-5abf-4fac-89c9-afedea695fab-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.949091 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6dbh\" (UniqueName: \"kubernetes.io/projected/30c1a0f0-5abf-4fac-89c9-afedea695fab-kube-api-access-c6dbh\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:12 crc kubenswrapper[4884]: I1210 00:53:12.955636 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"openstack-galera-0\" (UID: \"30c1a0f0-5abf-4fac-89c9-afedea695fab\") " pod="openstack/openstack-galera-0" Dec 10 00:53:13 crc kubenswrapper[4884]: I1210 00:53:13.041876 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.091644 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.093623 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.099303 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.099617 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.099744 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-k989c" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.101011 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.106883 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.244022 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.244078 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d089c5ef-75b6-480e-b726-abd349a291cc-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.244120 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d089c5ef-75b6-480e-b726-abd349a291cc-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.244148 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d089c5ef-75b6-480e-b726-abd349a291cc-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.244214 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d089c5ef-75b6-480e-b726-abd349a291cc-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.244294 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d089c5ef-75b6-480e-b726-abd349a291cc-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.244416 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d089c5ef-75b6-480e-b726-abd349a291cc-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.244486 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6556\" (UniqueName: \"kubernetes.io/projected/d089c5ef-75b6-480e-b726-abd349a291cc-kube-api-access-m6556\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.311609 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.312939 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.315071 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.315418 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-trp2t" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.315474 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.323864 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.347387 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d089c5ef-75b6-480e-b726-abd349a291cc-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.347515 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d089c5ef-75b6-480e-b726-abd349a291cc-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.347554 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d089c5ef-75b6-480e-b726-abd349a291cc-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.347585 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6556\" (UniqueName: \"kubernetes.io/projected/d089c5ef-75b6-480e-b726-abd349a291cc-kube-api-access-m6556\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.347628 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.347655 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d089c5ef-75b6-480e-b726-abd349a291cc-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.347692 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d089c5ef-75b6-480e-b726-abd349a291cc-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.347728 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d089c5ef-75b6-480e-b726-abd349a291cc-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.348460 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d089c5ef-75b6-480e-b726-abd349a291cc-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.348656 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.348707 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d089c5ef-75b6-480e-b726-abd349a291cc-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.349193 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d089c5ef-75b6-480e-b726-abd349a291cc-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.350980 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d089c5ef-75b6-480e-b726-abd349a291cc-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.362494 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d089c5ef-75b6-480e-b726-abd349a291cc-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.372874 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d089c5ef-75b6-480e-b726-abd349a291cc-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.375399 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.392001 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6556\" (UniqueName: \"kubernetes.io/projected/d089c5ef-75b6-480e-b726-abd349a291cc-kube-api-access-m6556\") pod \"openstack-cell1-galera-0\" (UID: \"d089c5ef-75b6-480e-b726-abd349a291cc\") " pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: W1210 00:53:14.401610 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2b91caf_ce98_4997_9f34_62031f4fb1f3.slice/crio-6463d3ffa03ddefb78b154759f3108a9df5020edf81b9a53838e41f8cd5949ab WatchSource:0}: Error finding container 6463d3ffa03ddefb78b154759f3108a9df5020edf81b9a53838e41f8cd5949ab: Status 404 returned error can't find the container with id 6463d3ffa03ddefb78b154759f3108a9df5020edf81b9a53838e41f8cd5949ab Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.412110 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.449753 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-kolla-config\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.450111 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.450225 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-config-data\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.450304 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2czg\" (UniqueName: \"kubernetes.io/projected/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-kube-api-access-n2czg\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.450409 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.552215 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.552281 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-config-data\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.552318 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2czg\" (UniqueName: \"kubernetes.io/projected/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-kube-api-access-n2czg\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.552356 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.552388 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-kolla-config\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.553800 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-kolla-config\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.553866 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-config-data\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.555330 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-memcached-tls-certs\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.556657 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-combined-ca-bundle\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.577344 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2czg\" (UniqueName: \"kubernetes.io/projected/fb3db35c-fdef-4e0a-81a3-7c13f3a20649-kube-api-access-n2czg\") pod \"memcached-0\" (UID: \"fb3db35c-fdef-4e0a-81a3-7c13f3a20649\") " pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.629648 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 10 00:53:14 crc kubenswrapper[4884]: I1210 00:53:14.688139 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" event={"ID":"a2b91caf-ce98-4997-9f34-62031f4fb1f3","Type":"ContainerStarted","Data":"6463d3ffa03ddefb78b154759f3108a9df5020edf81b9a53838e41f8cd5949ab"} Dec 10 00:53:16 crc kubenswrapper[4884]: I1210 00:53:16.717679 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 00:53:16 crc kubenswrapper[4884]: I1210 00:53:16.720668 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 00:53:16 crc kubenswrapper[4884]: I1210 00:53:16.722555 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-trt9w" Dec 10 00:53:16 crc kubenswrapper[4884]: I1210 00:53:16.726063 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 00:53:16 crc kubenswrapper[4884]: I1210 00:53:16.799931 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdghx\" (UniqueName: \"kubernetes.io/projected/ad50acf2-d8ef-4a8c-a594-dedfc823cf9f-kube-api-access-bdghx\") pod \"kube-state-metrics-0\" (UID: \"ad50acf2-d8ef-4a8c-a594-dedfc823cf9f\") " pod="openstack/kube-state-metrics-0" Dec 10 00:53:16 crc kubenswrapper[4884]: I1210 00:53:16.901912 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdghx\" (UniqueName: \"kubernetes.io/projected/ad50acf2-d8ef-4a8c-a594-dedfc823cf9f-kube-api-access-bdghx\") pod \"kube-state-metrics-0\" (UID: \"ad50acf2-d8ef-4a8c-a594-dedfc823cf9f\") " pod="openstack/kube-state-metrics-0" Dec 10 00:53:16 crc kubenswrapper[4884]: I1210 00:53:16.919184 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdghx\" (UniqueName: \"kubernetes.io/projected/ad50acf2-d8ef-4a8c-a594-dedfc823cf9f-kube-api-access-bdghx\") pod \"kube-state-metrics-0\" (UID: \"ad50acf2-d8ef-4a8c-a594-dedfc823cf9f\") " pod="openstack/kube-state-metrics-0" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.073227 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.178245 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl"] Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.179326 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.186165 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards-sa-dockercfg-qd984" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.186194 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-ui-dashboards" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.225518 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl"] Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.316292 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txz6t\" (UniqueName: \"kubernetes.io/projected/7a2ff0cb-6085-4680-a330-aec1d9452896-kube-api-access-txz6t\") pod \"observability-ui-dashboards-7d5fb4cbfb-dlcnl\" (UID: \"7a2ff0cb-6085-4680-a330-aec1d9452896\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.316722 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a2ff0cb-6085-4680-a330-aec1d9452896-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-dlcnl\" (UID: \"7a2ff0cb-6085-4680-a330-aec1d9452896\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.420387 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txz6t\" (UniqueName: \"kubernetes.io/projected/7a2ff0cb-6085-4680-a330-aec1d9452896-kube-api-access-txz6t\") pod \"observability-ui-dashboards-7d5fb4cbfb-dlcnl\" (UID: \"7a2ff0cb-6085-4680-a330-aec1d9452896\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.420495 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a2ff0cb-6085-4680-a330-aec1d9452896-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-dlcnl\" (UID: \"7a2ff0cb-6085-4680-a330-aec1d9452896\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl" Dec 10 00:53:17 crc kubenswrapper[4884]: E1210 00:53:17.421921 4884 secret.go:188] Couldn't get secret openshift-operators/observability-ui-dashboards: secret "observability-ui-dashboards" not found Dec 10 00:53:17 crc kubenswrapper[4884]: E1210 00:53:17.421966 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7a2ff0cb-6085-4680-a330-aec1d9452896-serving-cert podName:7a2ff0cb-6085-4680-a330-aec1d9452896 nodeName:}" failed. No retries permitted until 2025-12-10 00:53:17.921951256 +0000 UTC m=+1370.999908373 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/7a2ff0cb-6085-4680-a330-aec1d9452896-serving-cert") pod "observability-ui-dashboards-7d5fb4cbfb-dlcnl" (UID: "7a2ff0cb-6085-4680-a330-aec1d9452896") : secret "observability-ui-dashboards" not found Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.452565 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txz6t\" (UniqueName: \"kubernetes.io/projected/7a2ff0cb-6085-4680-a330-aec1d9452896-kube-api-access-txz6t\") pod \"observability-ui-dashboards-7d5fb4cbfb-dlcnl\" (UID: \"7a2ff0cb-6085-4680-a330-aec1d9452896\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.503701 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-57c8f7c948-d6fv7"] Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.504938 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.525801 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-57c8f7c948-d6fv7"] Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.624781 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2fd73d8f-f33d-451b-9ac1-1c94ea504284-service-ca\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.624834 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2fd73d8f-f33d-451b-9ac1-1c94ea504284-oauth-serving-cert\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.624865 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6jpd\" (UniqueName: \"kubernetes.io/projected/2fd73d8f-f33d-451b-9ac1-1c94ea504284-kube-api-access-l6jpd\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.624898 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2fd73d8f-f33d-451b-9ac1-1c94ea504284-console-oauth-config\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.625145 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2fd73d8f-f33d-451b-9ac1-1c94ea504284-trusted-ca-bundle\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.625236 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2fd73d8f-f33d-451b-9ac1-1c94ea504284-console-config\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.625368 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2fd73d8f-f33d-451b-9ac1-1c94ea504284-console-serving-cert\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.726635 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2fd73d8f-f33d-451b-9ac1-1c94ea504284-trusted-ca-bundle\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.726694 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2fd73d8f-f33d-451b-9ac1-1c94ea504284-console-config\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.726753 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2fd73d8f-f33d-451b-9ac1-1c94ea504284-console-serving-cert\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.726785 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2fd73d8f-f33d-451b-9ac1-1c94ea504284-service-ca\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.726810 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2fd73d8f-f33d-451b-9ac1-1c94ea504284-oauth-serving-cert\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.726827 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6jpd\" (UniqueName: \"kubernetes.io/projected/2fd73d8f-f33d-451b-9ac1-1c94ea504284-kube-api-access-l6jpd\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.726852 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2fd73d8f-f33d-451b-9ac1-1c94ea504284-console-oauth-config\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.727620 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/2fd73d8f-f33d-451b-9ac1-1c94ea504284-console-config\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.727659 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2fd73d8f-f33d-451b-9ac1-1c94ea504284-trusted-ca-bundle\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.728200 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/2fd73d8f-f33d-451b-9ac1-1c94ea504284-service-ca\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.728258 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/2fd73d8f-f33d-451b-9ac1-1c94ea504284-oauth-serving-cert\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.731172 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/2fd73d8f-f33d-451b-9ac1-1c94ea504284-console-serving-cert\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.741904 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/2fd73d8f-f33d-451b-9ac1-1c94ea504284-console-oauth-config\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.742466 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6jpd\" (UniqueName: \"kubernetes.io/projected/2fd73d8f-f33d-451b-9ac1-1c94ea504284-kube-api-access-l6jpd\") pod \"console-57c8f7c948-d6fv7\" (UID: \"2fd73d8f-f33d-451b-9ac1-1c94ea504284\") " pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.825832 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.930285 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a2ff0cb-6085-4680-a330-aec1d9452896-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-dlcnl\" (UID: \"7a2ff0cb-6085-4680-a330-aec1d9452896\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.930941 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.933914 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.936385 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.937001 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.937214 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.937263 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.937462 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-jjh9h" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.940089 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7a2ff0cb-6085-4680-a330-aec1d9452896-serving-cert\") pod \"observability-ui-dashboards-7d5fb4cbfb-dlcnl\" (UID: \"7a2ff0cb-6085-4680-a330-aec1d9452896\") " pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.943186 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Dec 10 00:53:17 crc kubenswrapper[4884]: I1210 00:53:17.954119 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.031899 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-config\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.031946 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.031984 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/23f4050b-2338-4211-9ff1-0997060904f8-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.032079 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/23f4050b-2338-4211-9ff1-0997060904f8-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.032161 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/23f4050b-2338-4211-9ff1-0997060904f8-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.032210 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.032331 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-npxg7\" (UniqueName: \"kubernetes.io/projected/23f4050b-2338-4211-9ff1-0997060904f8-kube-api-access-npxg7\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.032386 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.112153 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.134052 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/23f4050b-2338-4211-9ff1-0997060904f8-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.134123 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.134179 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-npxg7\" (UniqueName: \"kubernetes.io/projected/23f4050b-2338-4211-9ff1-0997060904f8-kube-api-access-npxg7\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.134209 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.135026 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.136093 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-config\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.136196 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.136333 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/23f4050b-2338-4211-9ff1-0997060904f8-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.136393 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/23f4050b-2338-4211-9ff1-0997060904f8-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.138475 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.139531 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/23f4050b-2338-4211-9ff1-0997060904f8-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.140906 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/23f4050b-2338-4211-9ff1-0997060904f8-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.149122 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-config\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.149522 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.154028 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/23f4050b-2338-4211-9ff1-0997060904f8-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.162686 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-npxg7\" (UniqueName: \"kubernetes.io/projected/23f4050b-2338-4211-9ff1-0997060904f8-kube-api-access-npxg7\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.170480 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"prometheus-metric-storage-0\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:18 crc kubenswrapper[4884]: I1210 00:53:18.300511 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.105381 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.106966 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.115277 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.115636 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.115808 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-krph4" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.115965 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.116684 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.128321 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.181682 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ffe4f087-ebb2-404d-bdc9-fe508c624b82-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.181735 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffe4f087-ebb2-404d-bdc9-fe508c624b82-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.181787 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe4f087-ebb2-404d-bdc9-fe508c624b82-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.181866 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98ccr\" (UniqueName: \"kubernetes.io/projected/ffe4f087-ebb2-404d-bdc9-fe508c624b82-kube-api-access-98ccr\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.181893 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ffe4f087-ebb2-404d-bdc9-fe508c624b82-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.181909 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffe4f087-ebb2-404d-bdc9-fe508c624b82-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.181933 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.181985 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffe4f087-ebb2-404d-bdc9-fe508c624b82-config\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.283251 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ffe4f087-ebb2-404d-bdc9-fe508c624b82-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.283321 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffe4f087-ebb2-404d-bdc9-fe508c624b82-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.283348 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.283388 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffe4f087-ebb2-404d-bdc9-fe508c624b82-config\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.283786 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.284014 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/ffe4f087-ebb2-404d-bdc9-fe508c624b82-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.284079 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ffe4f087-ebb2-404d-bdc9-fe508c624b82-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.284107 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffe4f087-ebb2-404d-bdc9-fe508c624b82-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.284145 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe4f087-ebb2-404d-bdc9-fe508c624b82-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.284190 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98ccr\" (UniqueName: \"kubernetes.io/projected/ffe4f087-ebb2-404d-bdc9-fe508c624b82-kube-api-access-98ccr\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.285195 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffe4f087-ebb2-404d-bdc9-fe508c624b82-config\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.285360 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ffe4f087-ebb2-404d-bdc9-fe508c624b82-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.293417 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffe4f087-ebb2-404d-bdc9-fe508c624b82-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.293424 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffe4f087-ebb2-404d-bdc9-fe508c624b82-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.294202 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ffe4f087-ebb2-404d-bdc9-fe508c624b82-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.303661 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98ccr\" (UniqueName: \"kubernetes.io/projected/ffe4f087-ebb2-404d-bdc9-fe508c624b82-kube-api-access-98ccr\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.323028 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"ovsdbserver-nb-0\" (UID: \"ffe4f087-ebb2-404d-bdc9-fe508c624b82\") " pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:20 crc kubenswrapper[4884]: I1210 00:53:20.449110 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.106040 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-577tv"] Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.107375 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.110125 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-d5g2l" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.110282 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.111356 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.132475 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-577tv"] Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.190959 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-m54sx"] Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.192979 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.199945 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-m54sx"] Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.204679 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b040b499-55d1-4173-bcfe-8e0100eed4b0-scripts\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.204744 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b040b499-55d1-4173-bcfe-8e0100eed4b0-var-run-ovn\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.204818 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b040b499-55d1-4173-bcfe-8e0100eed4b0-combined-ca-bundle\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.204857 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b040b499-55d1-4173-bcfe-8e0100eed4b0-var-log-ovn\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.204957 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htncv\" (UniqueName: \"kubernetes.io/projected/b040b499-55d1-4173-bcfe-8e0100eed4b0-kube-api-access-htncv\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.205025 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b040b499-55d1-4173-bcfe-8e0100eed4b0-var-run\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.205076 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b040b499-55d1-4173-bcfe-8e0100eed4b0-ovn-controller-tls-certs\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306176 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/fe96e3a4-d720-400e-9956-a5cda8c377d6-var-lib\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306219 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htncv\" (UniqueName: \"kubernetes.io/projected/b040b499-55d1-4173-bcfe-8e0100eed4b0-kube-api-access-htncv\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306248 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/fe96e3a4-d720-400e-9956-a5cda8c377d6-etc-ovs\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306287 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b040b499-55d1-4173-bcfe-8e0100eed4b0-var-run\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306306 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fe96e3a4-d720-400e-9956-a5cda8c377d6-var-run\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306360 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b040b499-55d1-4173-bcfe-8e0100eed4b0-ovn-controller-tls-certs\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306529 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe96e3a4-d720-400e-9956-a5cda8c377d6-scripts\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306629 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b040b499-55d1-4173-bcfe-8e0100eed4b0-scripts\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306674 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b040b499-55d1-4173-bcfe-8e0100eed4b0-var-run-ovn\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306774 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b040b499-55d1-4173-bcfe-8e0100eed4b0-combined-ca-bundle\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306822 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b040b499-55d1-4173-bcfe-8e0100eed4b0-var-log-ovn\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306906 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/fe96e3a4-d720-400e-9956-a5cda8c377d6-var-log\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.306953 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvp2m\" (UniqueName: \"kubernetes.io/projected/fe96e3a4-d720-400e-9956-a5cda8c377d6-kube-api-access-dvp2m\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.307058 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b040b499-55d1-4173-bcfe-8e0100eed4b0-var-run\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.307223 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b040b499-55d1-4173-bcfe-8e0100eed4b0-var-run-ovn\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.307247 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b040b499-55d1-4173-bcfe-8e0100eed4b0-var-log-ovn\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.309179 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b040b499-55d1-4173-bcfe-8e0100eed4b0-scripts\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.311390 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b040b499-55d1-4173-bcfe-8e0100eed4b0-ovn-controller-tls-certs\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.323737 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htncv\" (UniqueName: \"kubernetes.io/projected/b040b499-55d1-4173-bcfe-8e0100eed4b0-kube-api-access-htncv\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.324860 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b040b499-55d1-4173-bcfe-8e0100eed4b0-combined-ca-bundle\") pod \"ovn-controller-577tv\" (UID: \"b040b499-55d1-4173-bcfe-8e0100eed4b0\") " pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.408153 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe96e3a4-d720-400e-9956-a5cda8c377d6-scripts\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.408263 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/fe96e3a4-d720-400e-9956-a5cda8c377d6-var-log\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.408298 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvp2m\" (UniqueName: \"kubernetes.io/projected/fe96e3a4-d720-400e-9956-a5cda8c377d6-kube-api-access-dvp2m\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.408331 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/fe96e3a4-d720-400e-9956-a5cda8c377d6-var-lib\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.408363 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/fe96e3a4-d720-400e-9956-a5cda8c377d6-etc-ovs\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.408410 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fe96e3a4-d720-400e-9956-a5cda8c377d6-var-run\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.408520 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/fe96e3a4-d720-400e-9956-a5cda8c377d6-var-log\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.408542 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fe96e3a4-d720-400e-9956-a5cda8c377d6-var-run\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.408709 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/fe96e3a4-d720-400e-9956-a5cda8c377d6-etc-ovs\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.408708 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/fe96e3a4-d720-400e-9956-a5cda8c377d6-var-lib\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.413182 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe96e3a4-d720-400e-9956-a5cda8c377d6-scripts\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.426911 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvp2m\" (UniqueName: \"kubernetes.io/projected/fe96e3a4-d720-400e-9956-a5cda8c377d6-kube-api-access-dvp2m\") pod \"ovn-controller-ovs-m54sx\" (UID: \"fe96e3a4-d720-400e-9956-a5cda8c377d6\") " pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.439715 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-577tv" Dec 10 00:53:21 crc kubenswrapper[4884]: I1210 00:53:21.511455 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.878428 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.880372 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.882617 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.883109 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-sx2g2" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.883731 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.900778 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.901405 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.950414 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.950553 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.950580 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.950619 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rzvv\" (UniqueName: \"kubernetes.io/projected/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-kube-api-access-9rzvv\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.950791 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.950903 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.951058 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-config\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:23 crc kubenswrapper[4884]: I1210 00:53:23.951114 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.057645 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-config\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.057896 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.057959 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.058031 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.058061 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.058120 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rzvv\" (UniqueName: \"kubernetes.io/projected/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-kube-api-access-9rzvv\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.058213 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.058300 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.059860 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.060253 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-config\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.061224 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.063055 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.063479 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.074555 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.084404 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.084568 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rzvv\" (UniqueName: \"kubernetes.io/projected/7ba7b2e0-e414-4f73-b4b5-ffc5251a2709-kube-api-access-9rzvv\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.095975 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709\") " pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:24 crc kubenswrapper[4884]: I1210 00:53:24.214366 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:26 crc kubenswrapper[4884]: E1210 00:53:26.901629 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 10 00:53:26 crc kubenswrapper[4884]: E1210 00:53:26.902119 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-stsjj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-rbv6d_openstack(14a4ef5f-d8db-4d68-b124-0e38dca5a0f1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:53:26 crc kubenswrapper[4884]: E1210 00:53:26.903798 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" podUID="14a4ef5f-d8db-4d68-b124-0e38dca5a0f1" Dec 10 00:53:26 crc kubenswrapper[4884]: E1210 00:53:26.932897 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 10 00:53:26 crc kubenswrapper[4884]: E1210 00:53:26.933411 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2gw6q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-2cdg2_openstack(eb951b03-9465-4548-a8e1-ceeb8d50494f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:53:26 crc kubenswrapper[4884]: E1210 00:53:26.934869 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" podUID="eb951b03-9465-4548-a8e1-ceeb8d50494f" Dec 10 00:53:27 crc kubenswrapper[4884]: I1210 00:53:27.730496 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 00:53:27 crc kubenswrapper[4884]: W1210 00:53:27.737903 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b8886f7_cc8b_4a23_bd53_c5e369e0cd93.slice/crio-ed0536c73d783f21b855085e886a29c5fb2f3ed8b8ce675791e7d77388d6cefe WatchSource:0}: Error finding container ed0536c73d783f21b855085e886a29c5fb2f3ed8b8ce675791e7d77388d6cefe: Status 404 returned error can't find the container with id ed0536c73d783f21b855085e886a29c5fb2f3ed8b8ce675791e7d77388d6cefe Dec 10 00:53:27 crc kubenswrapper[4884]: I1210 00:53:27.777894 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 00:53:27 crc kubenswrapper[4884]: W1210 00:53:27.831733 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7343006c_fda9_4e2d_8767_41ee4412c601.slice/crio-281e970d556f566817222c7905ca9af5b6793b162dc9db9fef8551df498d2d38 WatchSource:0}: Error finding container 281e970d556f566817222c7905ca9af5b6793b162dc9db9fef8551df498d2d38: Status 404 returned error can't find the container with id 281e970d556f566817222c7905ca9af5b6793b162dc9db9fef8551df498d2d38 Dec 10 00:53:27 crc kubenswrapper[4884]: I1210 00:53:27.847842 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7343006c-fda9-4e2d-8767-41ee4412c601","Type":"ContainerStarted","Data":"281e970d556f566817222c7905ca9af5b6793b162dc9db9fef8551df498d2d38"} Dec 10 00:53:27 crc kubenswrapper[4884]: I1210 00:53:27.850072 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93","Type":"ContainerStarted","Data":"ed0536c73d783f21b855085e886a29c5fb2f3ed8b8ce675791e7d77388d6cefe"} Dec 10 00:53:27 crc kubenswrapper[4884]: I1210 00:53:27.853083 4884 generic.go:334] "Generic (PLEG): container finished" podID="a2b91caf-ce98-4997-9f34-62031f4fb1f3" containerID="030ba65eba5b832b47303b44ea1a23a6407601b5b6809cecd1da6728cf95c7c0" exitCode=0 Dec 10 00:53:27 crc kubenswrapper[4884]: I1210 00:53:27.853150 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" event={"ID":"a2b91caf-ce98-4997-9f34-62031f4fb1f3","Type":"ContainerDied","Data":"030ba65eba5b832b47303b44ea1a23a6407601b5b6809cecd1da6728cf95c7c0"} Dec 10 00:53:27 crc kubenswrapper[4884]: I1210 00:53:27.856409 4884 generic.go:334] "Generic (PLEG): container finished" podID="9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" containerID="b31c037b068905a287f100966271f733b70348b99fa872c478df313b5afb4390" exitCode=0 Dec 10 00:53:27 crc kubenswrapper[4884]: I1210 00:53:27.856509 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" event={"ID":"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a","Type":"ContainerDied","Data":"b31c037b068905a287f100966271f733b70348b99fa872c478df313b5afb4390"} Dec 10 00:53:28 crc kubenswrapper[4884]: E1210 00:53:28.116965 4884 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Dec 10 00:53:28 crc kubenswrapper[4884]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Dec 10 00:53:28 crc kubenswrapper[4884]: > podSandboxID="bda4ea7effc483706cc04a14f1f74fc9895d679860ab8c4ad47356c03ae841d1" Dec 10 00:53:28 crc kubenswrapper[4884]: E1210 00:53:28.117187 4884 kuberuntime_manager.go:1274] "Unhandled Error" err=< Dec 10 00:53:28 crc kubenswrapper[4884]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfdh5dfhb6h64h676hc4h78h97h669h54chfbh696hb5h54bh5d4h6bh64h644h677h584h5cbh698h9dh5bbh5f8h5b8hcdh644h5c7h694hbfh589q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qb8dr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5ccc8479f9-pptlw_openstack(9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Dec 10 00:53:28 crc kubenswrapper[4884]: > logger="UnhandledError" Dec 10 00:53:28 crc kubenswrapper[4884]: E1210 00:53:28.118249 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" podUID="9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.325845 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.339522 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.375588 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-57c8f7c948-d6fv7"] Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.388493 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-577tv"] Dec 10 00:53:28 crc kubenswrapper[4884]: W1210 00:53:28.389942 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad50acf2_d8ef_4a8c_a594_dedfc823cf9f.slice/crio-a21332546eb6181f8efe619ac3fd0fdb741b32fb0d502930a316c78aa5b667ff WatchSource:0}: Error finding container a21332546eb6181f8efe619ac3fd0fdb741b32fb0d502930a316c78aa5b667ff: Status 404 returned error can't find the container with id a21332546eb6181f8efe619ac3fd0fdb741b32fb0d502930a316c78aa5b667ff Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.396714 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.397790 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 00:53:28 crc kubenswrapper[4884]: W1210 00:53:28.420525 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2fd73d8f_f33d_451b_9ac1_1c94ea504284.slice/crio-78e7edcb7f82de73d36f488b9eea05430eba2c1c6b6a59ae78738e1237a8d4c7 WatchSource:0}: Error finding container 78e7edcb7f82de73d36f488b9eea05430eba2c1c6b6a59ae78738e1237a8d4c7: Status 404 returned error can't find the container with id 78e7edcb7f82de73d36f488b9eea05430eba2c1c6b6a59ae78738e1237a8d4c7 Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.428901 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.435510 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl"] Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.443451 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.452326 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb951b03-9465-4548-a8e1-ceeb8d50494f-config\") pod \"eb951b03-9465-4548-a8e1-ceeb8d50494f\" (UID: \"eb951b03-9465-4548-a8e1-ceeb8d50494f\") " Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.452406 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb951b03-9465-4548-a8e1-ceeb8d50494f-dns-svc\") pod \"eb951b03-9465-4548-a8e1-ceeb8d50494f\" (UID: \"eb951b03-9465-4548-a8e1-ceeb8d50494f\") " Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.452558 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2gw6q\" (UniqueName: \"kubernetes.io/projected/eb951b03-9465-4548-a8e1-ceeb8d50494f-kube-api-access-2gw6q\") pod \"eb951b03-9465-4548-a8e1-ceeb8d50494f\" (UID: \"eb951b03-9465-4548-a8e1-ceeb8d50494f\") " Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.452949 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb951b03-9465-4548-a8e1-ceeb8d50494f-config" (OuterVolumeSpecName: "config") pod "eb951b03-9465-4548-a8e1-ceeb8d50494f" (UID: "eb951b03-9465-4548-a8e1-ceeb8d50494f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.453474 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb951b03-9465-4548-a8e1-ceeb8d50494f-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.453489 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb951b03-9465-4548-a8e1-ceeb8d50494f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "eb951b03-9465-4548-a8e1-ceeb8d50494f" (UID: "eb951b03-9465-4548-a8e1-ceeb8d50494f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.453645 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.458924 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb951b03-9465-4548-a8e1-ceeb8d50494f-kube-api-access-2gw6q" (OuterVolumeSpecName: "kube-api-access-2gw6q") pod "eb951b03-9465-4548-a8e1-ceeb8d50494f" (UID: "eb951b03-9465-4548-a8e1-ceeb8d50494f"). InnerVolumeSpecName "kube-api-access-2gw6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.559969 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1-config\") pod \"14a4ef5f-d8db-4d68-b124-0e38dca5a0f1\" (UID: \"14a4ef5f-d8db-4d68-b124-0e38dca5a0f1\") " Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.560473 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stsjj\" (UniqueName: \"kubernetes.io/projected/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1-kube-api-access-stsjj\") pod \"14a4ef5f-d8db-4d68-b124-0e38dca5a0f1\" (UID: \"14a4ef5f-d8db-4d68-b124-0e38dca5a0f1\") " Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.560934 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eb951b03-9465-4548-a8e1-ceeb8d50494f-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.561006 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2gw6q\" (UniqueName: \"kubernetes.io/projected/eb951b03-9465-4548-a8e1-ceeb8d50494f-kube-api-access-2gw6q\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.561804 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1-config" (OuterVolumeSpecName: "config") pod "14a4ef5f-d8db-4d68-b124-0e38dca5a0f1" (UID: "14a4ef5f-d8db-4d68-b124-0e38dca5a0f1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.567626 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1-kube-api-access-stsjj" (OuterVolumeSpecName: "kube-api-access-stsjj") pod "14a4ef5f-d8db-4d68-b124-0e38dca5a0f1" (UID: "14a4ef5f-d8db-4d68-b124-0e38dca5a0f1"). InnerVolumeSpecName "kube-api-access-stsjj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.664769 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.664986 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stsjj\" (UniqueName: \"kubernetes.io/projected/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1-kube-api-access-stsjj\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.743747 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-m54sx"] Dec 10 00:53:28 crc kubenswrapper[4884]: W1210 00:53:28.757982 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe96e3a4_d720_400e_9956_a5cda8c377d6.slice/crio-7e4bee1045baadbecd2da4cadc6bf7fb51737588d66bbbf1317fdf1e6a5b4528 WatchSource:0}: Error finding container 7e4bee1045baadbecd2da4cadc6bf7fb51737588d66bbbf1317fdf1e6a5b4528: Status 404 returned error can't find the container with id 7e4bee1045baadbecd2da4cadc6bf7fb51737588d66bbbf1317fdf1e6a5b4528 Dec 10 00:53:28 crc kubenswrapper[4884]: W1210 00:53:28.793701 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ba7b2e0_e414_4f73_b4b5_ffc5251a2709.slice/crio-22f60aa9c2d6e0b2b058497bbabfa859aadf92607c8d766f7e47f8d5409409c4 WatchSource:0}: Error finding container 22f60aa9c2d6e0b2b058497bbabfa859aadf92607c8d766f7e47f8d5409409c4: Status 404 returned error can't find the container with id 22f60aa9c2d6e0b2b058497bbabfa859aadf92607c8d766f7e47f8d5409409c4 Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.797040 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.869270 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-m54sx" event={"ID":"fe96e3a4-d720-400e-9956-a5cda8c377d6","Type":"ContainerStarted","Data":"7e4bee1045baadbecd2da4cadc6bf7fb51737588d66bbbf1317fdf1e6a5b4528"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.870888 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d089c5ef-75b6-480e-b726-abd349a291cc","Type":"ContainerStarted","Data":"4a40b3421e8a0e9e8b85cecc7853a36f81c32f77ab2270721b070742a7614951"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.872070 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" event={"ID":"14a4ef5f-d8db-4d68-b124-0e38dca5a0f1","Type":"ContainerDied","Data":"6726ede1a4059ef62d3d59a8714cd1e04e40394b934a6bd74a6130231468c1ea"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.872155 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-rbv6d" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.873798 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"30c1a0f0-5abf-4fac-89c9-afedea695fab","Type":"ContainerStarted","Data":"f363896a107312b025c7d7dd1cb15f74c5a0cd5656f4533d5f199169b6ed414b"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.874878 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709","Type":"ContainerStarted","Data":"22f60aa9c2d6e0b2b058497bbabfa859aadf92607c8d766f7e47f8d5409409c4"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.876641 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" event={"ID":"a2b91caf-ce98-4997-9f34-62031f4fb1f3","Type":"ContainerStarted","Data":"aad6c2d8163706af971580b4d4968667298cd895e74fb92dae8ce0070679cde3"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.877384 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.878496 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"23f4050b-2338-4211-9ff1-0997060904f8","Type":"ContainerStarted","Data":"c035caf280c43e8a4da86390fa801ad96d2aa4c2b4f85572d2010554f11db6a4"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.880196 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-57c8f7c948-d6fv7" event={"ID":"2fd73d8f-f33d-451b-9ac1-1c94ea504284","Type":"ContainerStarted","Data":"c52ec1200cce51e4bfca5a3310510caedf1ee5d53811392f0a08f437da95747d"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.880236 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-57c8f7c948-d6fv7" event={"ID":"2fd73d8f-f33d-451b-9ac1-1c94ea504284","Type":"ContainerStarted","Data":"78e7edcb7f82de73d36f488b9eea05430eba2c1c6b6a59ae78738e1237a8d4c7"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.881876 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.881872 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-2cdg2" event={"ID":"eb951b03-9465-4548-a8e1-ceeb8d50494f","Type":"ContainerDied","Data":"f673897b1e103d131d7ff20b76b4a9bbe48f40e99c3f9321d7996d6a793be993"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.883026 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl" event={"ID":"7a2ff0cb-6085-4680-a330-aec1d9452896","Type":"ContainerStarted","Data":"fada3dd3936e1fce05561b9f7c1a93a8ae66e88a4e54c29100fe3b1c41d95fb2"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.883960 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ad50acf2-d8ef-4a8c-a594-dedfc823cf9f","Type":"ContainerStarted","Data":"a21332546eb6181f8efe619ac3fd0fdb741b32fb0d502930a316c78aa5b667ff"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.884836 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"fb3db35c-fdef-4e0a-81a3-7c13f3a20649","Type":"ContainerStarted","Data":"d0ffac05a38d4e038982d550f53c05cfb0454fe9011c2ee5ca4f70a1e50298ce"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.886316 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-577tv" event={"ID":"b040b499-55d1-4173-bcfe-8e0100eed4b0","Type":"ContainerStarted","Data":"84f9616e6b646a9cbf7aca225eabd05c8ddac00a96557f41080188d505a6b48f"} Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.921221 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" podStartSLOduration=6.283031242 podStartE2EDuration="18.921202272s" podCreationTimestamp="2025-12-10 00:53:10 +0000 UTC" firstStartedPulling="2025-12-10 00:53:14.407115668 +0000 UTC m=+1367.485072775" lastFinishedPulling="2025-12-10 00:53:27.045286688 +0000 UTC m=+1380.123243805" observedRunningTime="2025-12-10 00:53:28.894692858 +0000 UTC m=+1381.972649975" watchObservedRunningTime="2025-12-10 00:53:28.921202272 +0000 UTC m=+1381.999159379" Dec 10 00:53:28 crc kubenswrapper[4884]: I1210 00:53:28.946661 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-57c8f7c948-d6fv7" podStartSLOduration=11.946644018 podStartE2EDuration="11.946644018s" podCreationTimestamp="2025-12-10 00:53:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:53:28.942889487 +0000 UTC m=+1382.020846634" watchObservedRunningTime="2025-12-10 00:53:28.946644018 +0000 UTC m=+1382.024601135" Dec 10 00:53:29 crc kubenswrapper[4884]: I1210 00:53:29.020554 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rbv6d"] Dec 10 00:53:29 crc kubenswrapper[4884]: I1210 00:53:29.034851 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-rbv6d"] Dec 10 00:53:29 crc kubenswrapper[4884]: I1210 00:53:29.049844 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-2cdg2"] Dec 10 00:53:29 crc kubenswrapper[4884]: I1210 00:53:29.056966 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-2cdg2"] Dec 10 00:53:29 crc kubenswrapper[4884]: I1210 00:53:29.300634 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14a4ef5f-d8db-4d68-b124-0e38dca5a0f1" path="/var/lib/kubelet/pods/14a4ef5f-d8db-4d68-b124-0e38dca5a0f1/volumes" Dec 10 00:53:29 crc kubenswrapper[4884]: I1210 00:53:29.301157 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb951b03-9465-4548-a8e1-ceeb8d50494f" path="/var/lib/kubelet/pods/eb951b03-9465-4548-a8e1-ceeb8d50494f/volumes" Dec 10 00:53:29 crc kubenswrapper[4884]: I1210 00:53:29.677267 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 10 00:53:29 crc kubenswrapper[4884]: W1210 00:53:29.824595 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffe4f087_ebb2_404d_bdc9_fe508c624b82.slice/crio-5332a6f9226ab3ab84e2bcfa88f111c8256741bb229c2205838bb5ea931a8304 WatchSource:0}: Error finding container 5332a6f9226ab3ab84e2bcfa88f111c8256741bb229c2205838bb5ea931a8304: Status 404 returned error can't find the container with id 5332a6f9226ab3ab84e2bcfa88f111c8256741bb229c2205838bb5ea931a8304 Dec 10 00:53:29 crc kubenswrapper[4884]: I1210 00:53:29.912000 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" event={"ID":"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a","Type":"ContainerStarted","Data":"5d72baba2e4dcd47b84896f17dec1733412b3340a5989be073dbe0e72e42ac46"} Dec 10 00:53:29 crc kubenswrapper[4884]: I1210 00:53:29.912278 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:29 crc kubenswrapper[4884]: I1210 00:53:29.913873 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"ffe4f087-ebb2-404d-bdc9-fe508c624b82","Type":"ContainerStarted","Data":"5332a6f9226ab3ab84e2bcfa88f111c8256741bb229c2205838bb5ea931a8304"} Dec 10 00:53:29 crc kubenswrapper[4884]: I1210 00:53:29.934717 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" podStartSLOduration=4.779555096 podStartE2EDuration="20.934698315s" podCreationTimestamp="2025-12-10 00:53:09 +0000 UTC" firstStartedPulling="2025-12-10 00:53:10.894746223 +0000 UTC m=+1363.972703340" lastFinishedPulling="2025-12-10 00:53:27.049889442 +0000 UTC m=+1380.127846559" observedRunningTime="2025-12-10 00:53:29.927277566 +0000 UTC m=+1383.005234713" watchObservedRunningTime="2025-12-10 00:53:29.934698315 +0000 UTC m=+1383.012655432" Dec 10 00:53:35 crc kubenswrapper[4884]: I1210 00:53:35.282626 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:35 crc kubenswrapper[4884]: I1210 00:53:35.751030 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:35 crc kubenswrapper[4884]: I1210 00:53:35.821714 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-pptlw"] Dec 10 00:53:35 crc kubenswrapper[4884]: I1210 00:53:35.998570 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" podUID="9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" containerName="dnsmasq-dns" containerID="cri-o://5d72baba2e4dcd47b84896f17dec1733412b3340a5989be073dbe0e72e42ac46" gracePeriod=10 Dec 10 00:53:37 crc kubenswrapper[4884]: I1210 00:53:37.010018 4884 generic.go:334] "Generic (PLEG): container finished" podID="9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" containerID="5d72baba2e4dcd47b84896f17dec1733412b3340a5989be073dbe0e72e42ac46" exitCode=0 Dec 10 00:53:37 crc kubenswrapper[4884]: I1210 00:53:37.010149 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" event={"ID":"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a","Type":"ContainerDied","Data":"5d72baba2e4dcd47b84896f17dec1733412b3340a5989be073dbe0e72e42ac46"} Dec 10 00:53:37 crc kubenswrapper[4884]: I1210 00:53:37.826854 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:37 crc kubenswrapper[4884]: I1210 00:53:37.826913 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:37 crc kubenswrapper[4884]: I1210 00:53:37.832304 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:38 crc kubenswrapper[4884]: I1210 00:53:38.044411 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-57c8f7c948-d6fv7" Dec 10 00:53:38 crc kubenswrapper[4884]: I1210 00:53:38.117720 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-fb8bc9c44-vbbc8"] Dec 10 00:53:40 crc kubenswrapper[4884]: I1210 00:53:40.282655 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" podUID="9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.117:5353: connect: connection refused" Dec 10 00:53:41 crc kubenswrapper[4884]: E1210 00:53:41.490208 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 10 00:53:41 crc kubenswrapper[4884]: E1210 00:53:41.490985 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c6dbh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(30c1a0f0-5abf-4fac-89c9-afedea695fab): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:53:41 crc kubenswrapper[4884]: E1210 00:53:41.492379 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="30c1a0f0-5abf-4fac-89c9-afedea695fab" Dec 10 00:53:41 crc kubenswrapper[4884]: E1210 00:53:41.545935 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 10 00:53:41 crc kubenswrapper[4884]: E1210 00:53:41.546152 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m6556,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(d089c5ef-75b6-480e-b726-abd349a291cc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:53:41 crc kubenswrapper[4884]: E1210 00:53:41.547482 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="d089c5ef-75b6-480e-b726-abd349a291cc" Dec 10 00:53:41 crc kubenswrapper[4884]: E1210 00:53:41.915594 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified" Dec 10 00:53:41 crc kubenswrapper[4884]: E1210 00:53:41.916188 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-controller,Image:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,Command:[ovn-controller --pidfile unix:/run/openvswitch/db.sock --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5ffh65ch56ch67dh684h89h96h65ch5ffh59h55dh5c7h556h5f5hdfh88h5d6hfh579h69hcch67fh645h66bh57h9chdch559h674h5fh657h6cq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run-ovn,ReadOnly:false,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log-ovn,ReadOnly:false,MountPath:/var/log/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-htncv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_liveness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_readiness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/share/ovn/scripts/ovn-ctl stop_controller],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-577tv_openstack(b040b499-55d1-4173-bcfe-8e0100eed4b0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:53:41 crc kubenswrapper[4884]: E1210 00:53:41.917505 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-577tv" podUID="b040b499-55d1-4173-bcfe-8e0100eed4b0" Dec 10 00:53:42 crc kubenswrapper[4884]: E1210 00:53:42.084119 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="d089c5ef-75b6-480e-b726-abd349a291cc" Dec 10 00:53:42 crc kubenswrapper[4884]: E1210 00:53:42.084377 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="30c1a0f0-5abf-4fac-89c9-afedea695fab" Dec 10 00:53:42 crc kubenswrapper[4884]: E1210 00:53:42.086698 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified\\\"\"" pod="openstack/ovn-controller-577tv" podUID="b040b499-55d1-4173-bcfe-8e0100eed4b0" Dec 10 00:53:42 crc kubenswrapper[4884]: I1210 00:53:42.416886 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:42 crc kubenswrapper[4884]: I1210 00:53:42.546704 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-dns-svc\") pod \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\" (UID: \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\") " Dec 10 00:53:42 crc kubenswrapper[4884]: I1210 00:53:42.546767 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-config\") pod \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\" (UID: \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\") " Dec 10 00:53:42 crc kubenswrapper[4884]: I1210 00:53:42.546796 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qb8dr\" (UniqueName: \"kubernetes.io/projected/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-kube-api-access-qb8dr\") pod \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\" (UID: \"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a\") " Dec 10 00:53:42 crc kubenswrapper[4884]: I1210 00:53:42.552132 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-kube-api-access-qb8dr" (OuterVolumeSpecName: "kube-api-access-qb8dr") pod "9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" (UID: "9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a"). InnerVolumeSpecName "kube-api-access-qb8dr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:53:42 crc kubenswrapper[4884]: I1210 00:53:42.619233 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-config" (OuterVolumeSpecName: "config") pod "9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" (UID: "9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:53:42 crc kubenswrapper[4884]: I1210 00:53:42.649837 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:42 crc kubenswrapper[4884]: I1210 00:53:42.649907 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qb8dr\" (UniqueName: \"kubernetes.io/projected/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-kube-api-access-qb8dr\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:42 crc kubenswrapper[4884]: I1210 00:53:42.657039 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" (UID: "9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:53:42 crc kubenswrapper[4884]: I1210 00:53:42.751313 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:43 crc kubenswrapper[4884]: E1210 00:53:43.015757 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Dec 10 00:53:43 crc kubenswrapper[4884]: E1210 00:53:43.016112 4884 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Dec 10 00:53:43 crc kubenswrapper[4884]: E1210 00:53:43.016253 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bdghx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(ad50acf2-d8ef-4a8c-a594-dedfc823cf9f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 10 00:53:43 crc kubenswrapper[4884]: E1210 00:53:43.017322 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="ad50acf2-d8ef-4a8c-a594-dedfc823cf9f" Dec 10 00:53:43 crc kubenswrapper[4884]: I1210 00:53:43.096782 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" Dec 10 00:53:43 crc kubenswrapper[4884]: I1210 00:53:43.097011 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-pptlw" event={"ID":"9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a","Type":"ContainerDied","Data":"bda4ea7effc483706cc04a14f1f74fc9895d679860ab8c4ad47356c03ae841d1"} Dec 10 00:53:43 crc kubenswrapper[4884]: I1210 00:53:43.097141 4884 scope.go:117] "RemoveContainer" containerID="5d72baba2e4dcd47b84896f17dec1733412b3340a5989be073dbe0e72e42ac46" Dec 10 00:53:43 crc kubenswrapper[4884]: E1210 00:53:43.153161 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="ad50acf2-d8ef-4a8c-a594-dedfc823cf9f" Dec 10 00:53:43 crc kubenswrapper[4884]: I1210 00:53:43.211061 4884 scope.go:117] "RemoveContainer" containerID="b31c037b068905a287f100966271f733b70348b99fa872c478df313b5afb4390" Dec 10 00:53:43 crc kubenswrapper[4884]: I1210 00:53:43.212720 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-pptlw"] Dec 10 00:53:43 crc kubenswrapper[4884]: I1210 00:53:43.225254 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-pptlw"] Dec 10 00:53:43 crc kubenswrapper[4884]: I1210 00:53:43.303560 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" path="/var/lib/kubelet/pods/9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a/volumes" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.109331 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl" event={"ID":"7a2ff0cb-6085-4680-a330-aec1d9452896","Type":"ContainerStarted","Data":"640f626a39967980cedac93523b1e3524ac28ffe104db90313fccd90fd4d76ec"} Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.114549 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"fb3db35c-fdef-4e0a-81a3-7c13f3a20649","Type":"ContainerStarted","Data":"69e1ac76fa7dc22500d9a1662e2fc94fb6298dfb90fa494fbb2c75720aa0c602"} Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.114647 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.134999 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-ui-dashboards-7d5fb4cbfb-dlcnl" podStartSLOduration=13.213200901 podStartE2EDuration="27.134980423s" podCreationTimestamp="2025-12-10 00:53:17 +0000 UTC" firstStartedPulling="2025-12-10 00:53:28.415938526 +0000 UTC m=+1381.493895643" lastFinishedPulling="2025-12-10 00:53:42.337718048 +0000 UTC m=+1395.415675165" observedRunningTime="2025-12-10 00:53:44.132257519 +0000 UTC m=+1397.210214686" watchObservedRunningTime="2025-12-10 00:53:44.134980423 +0000 UTC m=+1397.212937550" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.184950 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=16.541190441 podStartE2EDuration="30.184933219s" podCreationTimestamp="2025-12-10 00:53:14 +0000 UTC" firstStartedPulling="2025-12-10 00:53:28.425962817 +0000 UTC m=+1381.503919934" lastFinishedPulling="2025-12-10 00:53:42.069705595 +0000 UTC m=+1395.147662712" observedRunningTime="2025-12-10 00:53:44.157620943 +0000 UTC m=+1397.235578100" watchObservedRunningTime="2025-12-10 00:53:44.184933219 +0000 UTC m=+1397.262890326" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.504456 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-wsrpp"] Dec 10 00:53:44 crc kubenswrapper[4884]: E1210 00:53:44.505263 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" containerName="init" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.505290 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" containerName="init" Dec 10 00:53:44 crc kubenswrapper[4884]: E1210 00:53:44.505304 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" containerName="dnsmasq-dns" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.505316 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" containerName="dnsmasq-dns" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.505550 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bc12a3c-e2b7-45af-a19d-97fb5a4f7b2a" containerName="dnsmasq-dns" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.507100 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.510387 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.513809 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wsrpp"] Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.590853 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/655ff7a7-35ba-4042-b170-f9de53553510-combined-ca-bundle\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.590920 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/655ff7a7-35ba-4042-b170-f9de53553510-ovn-rundir\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.591027 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/655ff7a7-35ba-4042-b170-f9de53553510-ovs-rundir\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.591175 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/655ff7a7-35ba-4042-b170-f9de53553510-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.591394 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/655ff7a7-35ba-4042-b170-f9de53553510-config\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.591679 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7zfq\" (UniqueName: \"kubernetes.io/projected/655ff7a7-35ba-4042-b170-f9de53553510-kube-api-access-n7zfq\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.693404 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/655ff7a7-35ba-4042-b170-f9de53553510-config\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.693506 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n7zfq\" (UniqueName: \"kubernetes.io/projected/655ff7a7-35ba-4042-b170-f9de53553510-kube-api-access-n7zfq\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.693547 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/655ff7a7-35ba-4042-b170-f9de53553510-combined-ca-bundle\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.693580 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/655ff7a7-35ba-4042-b170-f9de53553510-ovn-rundir\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.693649 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/655ff7a7-35ba-4042-b170-f9de53553510-ovs-rundir\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.693692 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/655ff7a7-35ba-4042-b170-f9de53553510-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.694187 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/655ff7a7-35ba-4042-b170-f9de53553510-ovs-rundir\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.694249 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/655ff7a7-35ba-4042-b170-f9de53553510-config\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.694674 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/655ff7a7-35ba-4042-b170-f9de53553510-ovn-rundir\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.776228 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/655ff7a7-35ba-4042-b170-f9de53553510-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.776716 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/655ff7a7-35ba-4042-b170-f9de53553510-combined-ca-bundle\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.777128 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7zfq\" (UniqueName: \"kubernetes.io/projected/655ff7a7-35ba-4042-b170-f9de53553510-kube-api-access-n7zfq\") pod \"ovn-controller-metrics-wsrpp\" (UID: \"655ff7a7-35ba-4042-b170-f9de53553510\") " pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.830485 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-dqc2s"] Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.830904 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-wsrpp" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.831945 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.843773 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-dqc2s"] Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.846511 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.998692 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bjnx\" (UniqueName: \"kubernetes.io/projected/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-kube-api-access-2bjnx\") pod \"dnsmasq-dns-6bc7876d45-dqc2s\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.999063 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-dqc2s\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.999269 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-config\") pod \"dnsmasq-dns-6bc7876d45-dqc2s\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:44 crc kubenswrapper[4884]: I1210 00:53:44.999362 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-dqc2s\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.032950 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-dqc2s"] Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.062042 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-plqg7"] Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.063358 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.067961 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.084962 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-plqg7"] Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.106501 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bjnx\" (UniqueName: \"kubernetes.io/projected/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-kube-api-access-2bjnx\") pod \"dnsmasq-dns-6bc7876d45-dqc2s\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.106802 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-dqc2s\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.107174 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-config\") pod \"dnsmasq-dns-6bc7876d45-dqc2s\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.107340 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-dqc2s\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.107700 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-dqc2s\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.108088 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-config\") pod \"dnsmasq-dns-6bc7876d45-dqc2s\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.108295 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-dqc2s\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.128657 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7343006c-fda9-4e2d-8767-41ee4412c601","Type":"ContainerStarted","Data":"8e10a63d7c4265ae6e3042c0e1fd366e4fec44c8460e8ef66d0de06d49221460"} Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.135471 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709","Type":"ContainerStarted","Data":"9f07be6d32acb3ebe999777cdfc3c6d6b6885f5cb8644071a501c5f519aef95c"} Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.144760 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-m54sx" event={"ID":"fe96e3a4-d720-400e-9956-a5cda8c377d6","Type":"ContainerStarted","Data":"cd968620ab18f974d0dfef12a86ad4cda493cda42ff415a7dc1c4e38c8848c37"} Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.173719 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bjnx\" (UniqueName: \"kubernetes.io/projected/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-kube-api-access-2bjnx\") pod \"dnsmasq-dns-6bc7876d45-dqc2s\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.211870 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.211918 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-dns-svc\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.212034 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-config\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.212053 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.212089 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5m59\" (UniqueName: \"kubernetes.io/projected/0f550bfc-1f18-4b79-85e0-420dc9f852c1-kube-api-access-z5m59\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.316993 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.317263 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.317621 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-dns-svc\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.317849 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-config\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.317874 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.317912 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5m59\" (UniqueName: \"kubernetes.io/projected/0f550bfc-1f18-4b79-85e0-420dc9f852c1-kube-api-access-z5m59\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.318153 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-dns-svc\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.319424 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.319531 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.319672 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-config\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.339089 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5m59\" (UniqueName: \"kubernetes.io/projected/0f550bfc-1f18-4b79-85e0-420dc9f852c1-kube-api-access-z5m59\") pod \"dnsmasq-dns-8554648995-plqg7\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.634712 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.649272 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-wsrpp"] Dec 10 00:53:45 crc kubenswrapper[4884]: W1210 00:53:45.660717 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod655ff7a7_35ba_4042_b170_f9de53553510.slice/crio-1d587fce8d4cc9f98a99c7ea9b0cbb5bfecbbee40a6a83d6fb110b116500aef1 WatchSource:0}: Error finding container 1d587fce8d4cc9f98a99c7ea9b0cbb5bfecbbee40a6a83d6fb110b116500aef1: Status 404 returned error can't find the container with id 1d587fce8d4cc9f98a99c7ea9b0cbb5bfecbbee40a6a83d6fb110b116500aef1 Dec 10 00:53:45 crc kubenswrapper[4884]: I1210 00:53:45.799278 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-dqc2s"] Dec 10 00:53:45 crc kubenswrapper[4884]: W1210 00:53:45.806340 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0e5b44bb_81ff_4112_aef7_1aa20e6a23a5.slice/crio-1d0a5ec04b12b08df5e7f70bb81404ae250c8c0f671e6c99ad5285907ba3b3b9 WatchSource:0}: Error finding container 1d0a5ec04b12b08df5e7f70bb81404ae250c8c0f671e6c99ad5285907ba3b3b9: Status 404 returned error can't find the container with id 1d0a5ec04b12b08df5e7f70bb81404ae250c8c0f671e6c99ad5285907ba3b3b9 Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.164610 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93","Type":"ContainerStarted","Data":"4e3d15fd0e5ae08056b22ae7da7e8668a86ba45b7eaea66648647fc03028403b"} Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.166736 4884 generic.go:334] "Generic (PLEG): container finished" podID="fe96e3a4-d720-400e-9956-a5cda8c377d6" containerID="cd968620ab18f974d0dfef12a86ad4cda493cda42ff415a7dc1c4e38c8848c37" exitCode=0 Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.166963 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-m54sx" event={"ID":"fe96e3a4-d720-400e-9956-a5cda8c377d6","Type":"ContainerDied","Data":"cd968620ab18f974d0dfef12a86ad4cda493cda42ff415a7dc1c4e38c8848c37"} Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.173396 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" event={"ID":"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5","Type":"ContainerStarted","Data":"cee94e1eeff0d8390467e1ccc6d51ca9563d143a30f45b2905fffc56f30c1b20"} Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.173487 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" event={"ID":"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5","Type":"ContainerStarted","Data":"1d0a5ec04b12b08df5e7f70bb81404ae250c8c0f671e6c99ad5285907ba3b3b9"} Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.182272 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wsrpp" event={"ID":"655ff7a7-35ba-4042-b170-f9de53553510","Type":"ContainerStarted","Data":"1d587fce8d4cc9f98a99c7ea9b0cbb5bfecbbee40a6a83d6fb110b116500aef1"} Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.184974 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"ffe4f087-ebb2-404d-bdc9-fe508c624b82","Type":"ContainerStarted","Data":"761171922ab53539f9dd2c3bb816befdfdf40e4156d1a261838ac8e319710535"} Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.189399 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-plqg7"] Dec 10 00:53:46 crc kubenswrapper[4884]: W1210 00:53:46.373492 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f550bfc_1f18_4b79_85e0_420dc9f852c1.slice/crio-81dfa229e1757be7be947c8328b741b5037d14a57d228587df642b35d34f1eec WatchSource:0}: Error finding container 81dfa229e1757be7be947c8328b741b5037d14a57d228587df642b35d34f1eec: Status 404 returned error can't find the container with id 81dfa229e1757be7be947c8328b741b5037d14a57d228587df642b35d34f1eec Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.700295 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.852304 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2bjnx\" (UniqueName: \"kubernetes.io/projected/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-kube-api-access-2bjnx\") pod \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.852349 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-config\") pod \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.852928 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-ovsdbserver-sb\") pod \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.852998 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-dns-svc\") pod \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\" (UID: \"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5\") " Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.857616 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-kube-api-access-2bjnx" (OuterVolumeSpecName: "kube-api-access-2bjnx") pod "0e5b44bb-81ff-4112-aef7-1aa20e6a23a5" (UID: "0e5b44bb-81ff-4112-aef7-1aa20e6a23a5"). InnerVolumeSpecName "kube-api-access-2bjnx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.877950 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0e5b44bb-81ff-4112-aef7-1aa20e6a23a5" (UID: "0e5b44bb-81ff-4112-aef7-1aa20e6a23a5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.878733 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-config" (OuterVolumeSpecName: "config") pod "0e5b44bb-81ff-4112-aef7-1aa20e6a23a5" (UID: "0e5b44bb-81ff-4112-aef7-1aa20e6a23a5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.879841 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0e5b44bb-81ff-4112-aef7-1aa20e6a23a5" (UID: "0e5b44bb-81ff-4112-aef7-1aa20e6a23a5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.955491 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.955516 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.955526 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2bjnx\" (UniqueName: \"kubernetes.io/projected/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-kube-api-access-2bjnx\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:46 crc kubenswrapper[4884]: I1210 00:53:46.955534 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.194337 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"23f4050b-2338-4211-9ff1-0997060904f8","Type":"ContainerStarted","Data":"bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7"} Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.197817 4884 generic.go:334] "Generic (PLEG): container finished" podID="0e5b44bb-81ff-4112-aef7-1aa20e6a23a5" containerID="cee94e1eeff0d8390467e1ccc6d51ca9563d143a30f45b2905fffc56f30c1b20" exitCode=0 Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.197905 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.198221 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" event={"ID":"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5","Type":"ContainerDied","Data":"cee94e1eeff0d8390467e1ccc6d51ca9563d143a30f45b2905fffc56f30c1b20"} Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.198254 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-dqc2s" event={"ID":"0e5b44bb-81ff-4112-aef7-1aa20e6a23a5","Type":"ContainerDied","Data":"1d0a5ec04b12b08df5e7f70bb81404ae250c8c0f671e6c99ad5285907ba3b3b9"} Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.198292 4884 scope.go:117] "RemoveContainer" containerID="cee94e1eeff0d8390467e1ccc6d51ca9563d143a30f45b2905fffc56f30c1b20" Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.212837 4884 generic.go:334] "Generic (PLEG): container finished" podID="0f550bfc-1f18-4b79-85e0-420dc9f852c1" containerID="446e6fba334794991b09a382a3f205b9831a403528c007953ec237b50f2832ea" exitCode=0 Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.212917 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-plqg7" event={"ID":"0f550bfc-1f18-4b79-85e0-420dc9f852c1","Type":"ContainerDied","Data":"446e6fba334794991b09a382a3f205b9831a403528c007953ec237b50f2832ea"} Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.212952 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-plqg7" event={"ID":"0f550bfc-1f18-4b79-85e0-420dc9f852c1","Type":"ContainerStarted","Data":"81dfa229e1757be7be947c8328b741b5037d14a57d228587df642b35d34f1eec"} Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.221171 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-m54sx" event={"ID":"fe96e3a4-d720-400e-9956-a5cda8c377d6","Type":"ContainerStarted","Data":"fe922f25c7091a2729074d4faaa9b3b381eb186aa21fb00ca8fba563addabb63"} Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.221220 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.221233 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-m54sx" event={"ID":"fe96e3a4-d720-400e-9956-a5cda8c377d6","Type":"ContainerStarted","Data":"11c7b61036bd3dfe1c089dcc1cc78f94bc799286805630a155b4265ba7f55245"} Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.221271 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.309217 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-m54sx" podStartSLOduration=13.000498016 podStartE2EDuration="26.309201166s" podCreationTimestamp="2025-12-10 00:53:21 +0000 UTC" firstStartedPulling="2025-12-10 00:53:28.760908623 +0000 UTC m=+1381.838865740" lastFinishedPulling="2025-12-10 00:53:42.069611783 +0000 UTC m=+1395.147568890" observedRunningTime="2025-12-10 00:53:47.304591642 +0000 UTC m=+1400.382548759" watchObservedRunningTime="2025-12-10 00:53:47.309201166 +0000 UTC m=+1400.387158283" Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.349363 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-dqc2s"] Dec 10 00:53:47 crc kubenswrapper[4884]: I1210 00:53:47.356930 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-dqc2s"] Dec 10 00:53:49 crc kubenswrapper[4884]: I1210 00:53:49.301592 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e5b44bb-81ff-4112-aef7-1aa20e6a23a5" path="/var/lib/kubelet/pods/0e5b44bb-81ff-4112-aef7-1aa20e6a23a5/volumes" Dec 10 00:53:49 crc kubenswrapper[4884]: I1210 00:53:49.632654 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Dec 10 00:53:50 crc kubenswrapper[4884]: I1210 00:53:50.607369 4884 scope.go:117] "RemoveContainer" containerID="cee94e1eeff0d8390467e1ccc6d51ca9563d143a30f45b2905fffc56f30c1b20" Dec 10 00:53:50 crc kubenswrapper[4884]: E1210 00:53:50.608216 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cee94e1eeff0d8390467e1ccc6d51ca9563d143a30f45b2905fffc56f30c1b20\": container with ID starting with cee94e1eeff0d8390467e1ccc6d51ca9563d143a30f45b2905fffc56f30c1b20 not found: ID does not exist" containerID="cee94e1eeff0d8390467e1ccc6d51ca9563d143a30f45b2905fffc56f30c1b20" Dec 10 00:53:50 crc kubenswrapper[4884]: I1210 00:53:50.608332 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cee94e1eeff0d8390467e1ccc6d51ca9563d143a30f45b2905fffc56f30c1b20"} err="failed to get container status \"cee94e1eeff0d8390467e1ccc6d51ca9563d143a30f45b2905fffc56f30c1b20\": rpc error: code = NotFound desc = could not find container \"cee94e1eeff0d8390467e1ccc6d51ca9563d143a30f45b2905fffc56f30c1b20\": container with ID starting with cee94e1eeff0d8390467e1ccc6d51ca9563d143a30f45b2905fffc56f30c1b20 not found: ID does not exist" Dec 10 00:53:51 crc kubenswrapper[4884]: I1210 00:53:51.258463 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"ffe4f087-ebb2-404d-bdc9-fe508c624b82","Type":"ContainerStarted","Data":"889e975638225ab6db0cb17e1207c204d93777ae8cce2307adeda383bad2c4f2"} Dec 10 00:53:51 crc kubenswrapper[4884]: I1210 00:53:51.260088 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"7ba7b2e0-e414-4f73-b4b5-ffc5251a2709","Type":"ContainerStarted","Data":"faa1df3009aeca732a01c5cb50190e072515fe5a96cc234cff1e944a3d6a0bd7"} Dec 10 00:53:51 crc kubenswrapper[4884]: I1210 00:53:51.264495 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-plqg7" event={"ID":"0f550bfc-1f18-4b79-85e0-420dc9f852c1","Type":"ContainerStarted","Data":"41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2"} Dec 10 00:53:51 crc kubenswrapper[4884]: I1210 00:53:51.264696 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:51 crc kubenswrapper[4884]: I1210 00:53:51.266052 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-wsrpp" event={"ID":"655ff7a7-35ba-4042-b170-f9de53553510","Type":"ContainerStarted","Data":"00cc13c8bf4bd21e48d1836949efba27bb56f4bcf6d3aa053dc09e37c7b39316"} Dec 10 00:53:51 crc kubenswrapper[4884]: I1210 00:53:51.305308 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=11.404298821 podStartE2EDuration="32.305276528s" podCreationTimestamp="2025-12-10 00:53:19 +0000 UTC" firstStartedPulling="2025-12-10 00:53:29.826767317 +0000 UTC m=+1382.904724434" lastFinishedPulling="2025-12-10 00:53:50.727745024 +0000 UTC m=+1403.805702141" observedRunningTime="2025-12-10 00:53:51.281685571 +0000 UTC m=+1404.359642698" watchObservedRunningTime="2025-12-10 00:53:51.305276528 +0000 UTC m=+1404.383233645" Dec 10 00:53:51 crc kubenswrapper[4884]: I1210 00:53:51.348818 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-wsrpp" podStartSLOduration=2.300716828 podStartE2EDuration="7.3487825s" podCreationTimestamp="2025-12-10 00:53:44 +0000 UTC" firstStartedPulling="2025-12-10 00:53:45.66256801 +0000 UTC m=+1398.740525127" lastFinishedPulling="2025-12-10 00:53:50.710633682 +0000 UTC m=+1403.788590799" observedRunningTime="2025-12-10 00:53:51.321964097 +0000 UTC m=+1404.399921234" watchObservedRunningTime="2025-12-10 00:53:51.3487825 +0000 UTC m=+1404.426739617" Dec 10 00:53:51 crc kubenswrapper[4884]: I1210 00:53:51.359062 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=7.443410906 podStartE2EDuration="29.359040536s" podCreationTimestamp="2025-12-10 00:53:22 +0000 UTC" firstStartedPulling="2025-12-10 00:53:28.794981861 +0000 UTC m=+1381.872938978" lastFinishedPulling="2025-12-10 00:53:50.710611491 +0000 UTC m=+1403.788568608" observedRunningTime="2025-12-10 00:53:51.348629116 +0000 UTC m=+1404.426586273" watchObservedRunningTime="2025-12-10 00:53:51.359040536 +0000 UTC m=+1404.436997673" Dec 10 00:53:51 crc kubenswrapper[4884]: I1210 00:53:51.384716 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-plqg7" podStartSLOduration=6.384685287 podStartE2EDuration="6.384685287s" podCreationTimestamp="2025-12-10 00:53:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:53:51.380049122 +0000 UTC m=+1404.458006259" watchObservedRunningTime="2025-12-10 00:53:51.384685287 +0000 UTC m=+1404.462642394" Dec 10 00:53:53 crc kubenswrapper[4884]: I1210 00:53:53.288543 4884 generic.go:334] "Generic (PLEG): container finished" podID="23f4050b-2338-4211-9ff1-0997060904f8" containerID="bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7" exitCode=0 Dec 10 00:53:53 crc kubenswrapper[4884]: I1210 00:53:53.296740 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"23f4050b-2338-4211-9ff1-0997060904f8","Type":"ContainerDied","Data":"bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7"} Dec 10 00:53:53 crc kubenswrapper[4884]: I1210 00:53:53.449966 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:53 crc kubenswrapper[4884]: I1210 00:53:53.514612 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.216152 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.216722 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.286200 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.314621 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.371194 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.377760 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.804771 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Dec 10 00:53:54 crc kubenswrapper[4884]: E1210 00:53:54.805352 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e5b44bb-81ff-4112-aef7-1aa20e6a23a5" containerName="init" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.805369 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e5b44bb-81ff-4112-aef7-1aa20e6a23a5" containerName="init" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.806386 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e5b44bb-81ff-4112-aef7-1aa20e6a23a5" containerName="init" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.809373 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.850383 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-q46qg" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.850629 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.850638 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.850763 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.860303 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.914289 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/785aab89-2566-4c6f-a2b6-58207021cf39-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.914384 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/785aab89-2566-4c6f-a2b6-58207021cf39-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.914406 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/785aab89-2566-4c6f-a2b6-58207021cf39-config\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.915243 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/785aab89-2566-4c6f-a2b6-58207021cf39-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.915278 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/785aab89-2566-4c6f-a2b6-58207021cf39-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.915318 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjpq2\" (UniqueName: \"kubernetes.io/projected/785aab89-2566-4c6f-a2b6-58207021cf39-kube-api-access-mjpq2\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:54 crc kubenswrapper[4884]: I1210 00:53:54.915340 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/785aab89-2566-4c6f-a2b6-58207021cf39-scripts\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.017489 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/785aab89-2566-4c6f-a2b6-58207021cf39-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.017567 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/785aab89-2566-4c6f-a2b6-58207021cf39-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.017587 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/785aab89-2566-4c6f-a2b6-58207021cf39-config\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.017612 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/785aab89-2566-4c6f-a2b6-58207021cf39-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.017635 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/785aab89-2566-4c6f-a2b6-58207021cf39-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.017670 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjpq2\" (UniqueName: \"kubernetes.io/projected/785aab89-2566-4c6f-a2b6-58207021cf39-kube-api-access-mjpq2\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.017689 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/785aab89-2566-4c6f-a2b6-58207021cf39-scripts\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.018456 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/785aab89-2566-4c6f-a2b6-58207021cf39-scripts\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.018967 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/785aab89-2566-4c6f-a2b6-58207021cf39-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.019192 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/785aab89-2566-4c6f-a2b6-58207021cf39-config\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.024523 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/785aab89-2566-4c6f-a2b6-58207021cf39-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.024855 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/785aab89-2566-4c6f-a2b6-58207021cf39-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.025237 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/785aab89-2566-4c6f-a2b6-58207021cf39-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.037030 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjpq2\" (UniqueName: \"kubernetes.io/projected/785aab89-2566-4c6f-a2b6-58207021cf39-kube-api-access-mjpq2\") pod \"ovn-northd-0\" (UID: \"785aab89-2566-4c6f-a2b6-58207021cf39\") " pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.190100 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.330272 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ad50acf2-d8ef-4a8c-a594-dedfc823cf9f","Type":"ContainerStarted","Data":"c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114"} Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.330875 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.357673 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=13.063919509 podStartE2EDuration="39.357648406s" podCreationTimestamp="2025-12-10 00:53:16 +0000 UTC" firstStartedPulling="2025-12-10 00:53:28.39161076 +0000 UTC m=+1381.469567877" lastFinishedPulling="2025-12-10 00:53:54.685339657 +0000 UTC m=+1407.763296774" observedRunningTime="2025-12-10 00:53:55.349683721 +0000 UTC m=+1408.427640858" watchObservedRunningTime="2025-12-10 00:53:55.357648406 +0000 UTC m=+1408.435605523" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.636225 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.706036 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-m2f9c"] Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.706271 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" podUID="a2b91caf-ce98-4997-9f34-62031f4fb1f3" containerName="dnsmasq-dns" containerID="cri-o://aad6c2d8163706af971580b4d4968667298cd895e74fb92dae8ce0070679cde3" gracePeriod=10 Dec 10 00:53:55 crc kubenswrapper[4884]: I1210 00:53:55.747570 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.342993 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"785aab89-2566-4c6f-a2b6-58207021cf39","Type":"ContainerStarted","Data":"3aaf0fd1a4bde139bec9ecafdb98fdbc6850a5fbd7865fad5ec1b81ff4c2d01e"} Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.347925 4884 generic.go:334] "Generic (PLEG): container finished" podID="a2b91caf-ce98-4997-9f34-62031f4fb1f3" containerID="aad6c2d8163706af971580b4d4968667298cd895e74fb92dae8ce0070679cde3" exitCode=0 Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.348885 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" event={"ID":"a2b91caf-ce98-4997-9f34-62031f4fb1f3","Type":"ContainerDied","Data":"aad6c2d8163706af971580b4d4968667298cd895e74fb92dae8ce0070679cde3"} Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.413641 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.450184 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vjbm\" (UniqueName: \"kubernetes.io/projected/a2b91caf-ce98-4997-9f34-62031f4fb1f3-kube-api-access-9vjbm\") pod \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\" (UID: \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\") " Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.450569 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2b91caf-ce98-4997-9f34-62031f4fb1f3-dns-svc\") pod \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\" (UID: \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\") " Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.450682 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2b91caf-ce98-4997-9f34-62031f4fb1f3-config\") pod \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\" (UID: \"a2b91caf-ce98-4997-9f34-62031f4fb1f3\") " Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.459637 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2b91caf-ce98-4997-9f34-62031f4fb1f3-kube-api-access-9vjbm" (OuterVolumeSpecName: "kube-api-access-9vjbm") pod "a2b91caf-ce98-4997-9f34-62031f4fb1f3" (UID: "a2b91caf-ce98-4997-9f34-62031f4fb1f3"). InnerVolumeSpecName "kube-api-access-9vjbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.506156 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2b91caf-ce98-4997-9f34-62031f4fb1f3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a2b91caf-ce98-4997-9f34-62031f4fb1f3" (UID: "a2b91caf-ce98-4997-9f34-62031f4fb1f3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.517107 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2b91caf-ce98-4997-9f34-62031f4fb1f3-config" (OuterVolumeSpecName: "config") pod "a2b91caf-ce98-4997-9f34-62031f4fb1f3" (UID: "a2b91caf-ce98-4997-9f34-62031f4fb1f3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.552802 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2b91caf-ce98-4997-9f34-62031f4fb1f3-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.552836 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2b91caf-ce98-4997-9f34-62031f4fb1f3-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.552851 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vjbm\" (UniqueName: \"kubernetes.io/projected/a2b91caf-ce98-4997-9f34-62031f4fb1f3-kube-api-access-9vjbm\") on node \"crc\" DevicePath \"\"" Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.961985 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-drtcs"] Dec 10 00:53:56 crc kubenswrapper[4884]: E1210 00:53:56.962344 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2b91caf-ce98-4997-9f34-62031f4fb1f3" containerName="dnsmasq-dns" Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.962368 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2b91caf-ce98-4997-9f34-62031f4fb1f3" containerName="dnsmasq-dns" Dec 10 00:53:56 crc kubenswrapper[4884]: E1210 00:53:56.962380 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2b91caf-ce98-4997-9f34-62031f4fb1f3" containerName="init" Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.962387 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2b91caf-ce98-4997-9f34-62031f4fb1f3" containerName="init" Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.962616 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2b91caf-ce98-4997-9f34-62031f4fb1f3" containerName="dnsmasq-dns" Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.970767 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:56 crc kubenswrapper[4884]: I1210 00:53:56.971036 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-drtcs"] Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.067252 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.067619 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.067915 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.067983 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhwnl\" (UniqueName: \"kubernetes.io/projected/20a98971-47d1-49fe-b09b-84acaa9b4c6f-kube-api-access-bhwnl\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.068015 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-config\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.170236 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.170313 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhwnl\" (UniqueName: \"kubernetes.io/projected/20a98971-47d1-49fe-b09b-84acaa9b4c6f-kube-api-access-bhwnl\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.170341 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-config\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.170404 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.170503 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.171549 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.173409 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.173526 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.172354 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-config\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.195420 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhwnl\" (UniqueName: \"kubernetes.io/projected/20a98971-47d1-49fe-b09b-84acaa9b4c6f-kube-api-access-bhwnl\") pod \"dnsmasq-dns-b8fbc5445-drtcs\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.295043 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.362870 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d089c5ef-75b6-480e-b726-abd349a291cc","Type":"ContainerStarted","Data":"9b888f96e10346f125bc6ebb54a3d65e3f15428017766169357ea5bf609a6a36"} Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.365526 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"30c1a0f0-5abf-4fac-89c9-afedea695fab","Type":"ContainerStarted","Data":"6c541d493db47c2905cbf64d221dc9713e7e0bc75458e8d1e34a4e3e901e329b"} Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.367374 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-577tv" event={"ID":"b040b499-55d1-4173-bcfe-8e0100eed4b0","Type":"ContainerStarted","Data":"aac96a1c221a6e277a99bfd99b119c230ed818f52a21b334f4364a9b92082263"} Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.367622 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-577tv" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.369543 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" event={"ID":"a2b91caf-ce98-4997-9f34-62031f4fb1f3","Type":"ContainerDied","Data":"6463d3ffa03ddefb78b154759f3108a9df5020edf81b9a53838e41f8cd5949ab"} Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.369589 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-m2f9c" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.369590 4884 scope.go:117] "RemoveContainer" containerID="aad6c2d8163706af971580b4d4968667298cd895e74fb92dae8ce0070679cde3" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.459063 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-577tv" podStartSLOduration=8.947676175 podStartE2EDuration="36.459047037s" podCreationTimestamp="2025-12-10 00:53:21 +0000 UTC" firstStartedPulling="2025-12-10 00:53:28.449207603 +0000 UTC m=+1381.527164720" lastFinishedPulling="2025-12-10 00:53:55.960578465 +0000 UTC m=+1409.038535582" observedRunningTime="2025-12-10 00:53:57.440990921 +0000 UTC m=+1410.518948058" watchObservedRunningTime="2025-12-10 00:53:57.459047037 +0000 UTC m=+1410.537004154" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.472008 4884 scope.go:117] "RemoveContainer" containerID="030ba65eba5b832b47303b44ea1a23a6407601b5b6809cecd1da6728cf95c7c0" Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.495917 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-m2f9c"] Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.510821 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-m2f9c"] Dec 10 00:53:57 crc kubenswrapper[4884]: I1210 00:53:57.848303 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-drtcs"] Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.105457 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.111328 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.113283 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.113490 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-674z4" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.113681 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.114255 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.137360 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.289839 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/77e9a322-ea03-4101-b8be-d1e09f67e8c2-cache\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.289899 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.290051 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86s2r\" (UniqueName: \"kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-kube-api-access-86s2r\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.290130 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.290178 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/77e9a322-ea03-4101-b8be-d1e09f67e8c2-lock\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.396103 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.396168 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/77e9a322-ea03-4101-b8be-d1e09f67e8c2-lock\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.396224 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/77e9a322-ea03-4101-b8be-d1e09f67e8c2-cache\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.396273 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.396357 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86s2r\" (UniqueName: \"kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-kube-api-access-86s2r\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.396602 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: E1210 00:53:58.396276 4884 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 00:53:58 crc kubenswrapper[4884]: E1210 00:53:58.396666 4884 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 00:53:58 crc kubenswrapper[4884]: E1210 00:53:58.396704 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift podName:77e9a322-ea03-4101-b8be-d1e09f67e8c2 nodeName:}" failed. No retries permitted until 2025-12-10 00:53:58.896690396 +0000 UTC m=+1411.974647513 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift") pod "swift-storage-0" (UID: "77e9a322-ea03-4101-b8be-d1e09f67e8c2") : configmap "swift-ring-files" not found Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.396742 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/77e9a322-ea03-4101-b8be-d1e09f67e8c2-cache\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.396803 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/77e9a322-ea03-4101-b8be-d1e09f67e8c2-lock\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.430478 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" event={"ID":"20a98971-47d1-49fe-b09b-84acaa9b4c6f","Type":"ContainerStarted","Data":"c8aeefece315aa386c865813cc08d7fb49694b7dd73f37f834162e36c012a2d1"} Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.443704 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.463480 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86s2r\" (UniqueName: \"kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-kube-api-access-86s2r\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.677282 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-t7f78"] Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.678898 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.682910 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.683176 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.686188 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.689225 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-t7f78"] Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.803011 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-combined-ca-bundle\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.803052 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-swiftconf\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.803072 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vx6mb\" (UniqueName: \"kubernetes.io/projected/e96bb397-11f0-4e24-aafc-9f399d5846b8-kube-api-access-vx6mb\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.803264 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e96bb397-11f0-4e24-aafc-9f399d5846b8-ring-data-devices\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.803301 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-dispersionconf\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.803544 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e96bb397-11f0-4e24-aafc-9f399d5846b8-etc-swift\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.803644 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e96bb397-11f0-4e24-aafc-9f399d5846b8-scripts\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.905245 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e96bb397-11f0-4e24-aafc-9f399d5846b8-ring-data-devices\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.905333 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-dispersionconf\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.905453 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e96bb397-11f0-4e24-aafc-9f399d5846b8-etc-swift\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.905494 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e96bb397-11f0-4e24-aafc-9f399d5846b8-scripts\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.905533 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-combined-ca-bundle\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.905555 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-swiftconf\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.905570 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vx6mb\" (UniqueName: \"kubernetes.io/projected/e96bb397-11f0-4e24-aafc-9f399d5846b8-kube-api-access-vx6mb\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.905603 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:58 crc kubenswrapper[4884]: E1210 00:53:58.905758 4884 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 00:53:58 crc kubenswrapper[4884]: E1210 00:53:58.905778 4884 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 00:53:58 crc kubenswrapper[4884]: E1210 00:53:58.905820 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift podName:77e9a322-ea03-4101-b8be-d1e09f67e8c2 nodeName:}" failed. No retries permitted until 2025-12-10 00:53:59.905805977 +0000 UTC m=+1412.983763094 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift") pod "swift-storage-0" (UID: "77e9a322-ea03-4101-b8be-d1e09f67e8c2") : configmap "swift-ring-files" not found Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.906653 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e96bb397-11f0-4e24-aafc-9f399d5846b8-ring-data-devices\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.909033 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e96bb397-11f0-4e24-aafc-9f399d5846b8-etc-swift\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.909392 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e96bb397-11f0-4e24-aafc-9f399d5846b8-scripts\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.912022 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-combined-ca-bundle\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.912536 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-swiftconf\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.913417 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-dispersionconf\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.924084 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vx6mb\" (UniqueName: \"kubernetes.io/projected/e96bb397-11f0-4e24-aafc-9f399d5846b8-kube-api-access-vx6mb\") pod \"swift-ring-rebalance-t7f78\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:58 crc kubenswrapper[4884]: I1210 00:53:58.997289 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:53:59 crc kubenswrapper[4884]: I1210 00:53:59.299372 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2b91caf-ce98-4997-9f34-62031f4fb1f3" path="/var/lib/kubelet/pods/a2b91caf-ce98-4997-9f34-62031f4fb1f3/volumes" Dec 10 00:53:59 crc kubenswrapper[4884]: I1210 00:53:59.437677 4884 generic.go:334] "Generic (PLEG): container finished" podID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" containerID="aeb7b564e24d5cfae85a54d26d75dea336db4620f5dd8a333580ed56343f978d" exitCode=0 Dec 10 00:53:59 crc kubenswrapper[4884]: I1210 00:53:59.437775 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" event={"ID":"20a98971-47d1-49fe-b09b-84acaa9b4c6f","Type":"ContainerDied","Data":"aeb7b564e24d5cfae85a54d26d75dea336db4620f5dd8a333580ed56343f978d"} Dec 10 00:53:59 crc kubenswrapper[4884]: I1210 00:53:59.440312 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"785aab89-2566-4c6f-a2b6-58207021cf39","Type":"ContainerStarted","Data":"66d59c981181caaf19f0b50b5058b4f5a572ddccb4f8873602e849c8cce4b336"} Dec 10 00:53:59 crc kubenswrapper[4884]: I1210 00:53:59.440352 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"785aab89-2566-4c6f-a2b6-58207021cf39","Type":"ContainerStarted","Data":"8f300a6cb41872a56eeae1afc895c9e1ca63d44ecacac499447b9b747f934d39"} Dec 10 00:53:59 crc kubenswrapper[4884]: I1210 00:53:59.440479 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Dec 10 00:53:59 crc kubenswrapper[4884]: I1210 00:53:59.487708 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-t7f78"] Dec 10 00:53:59 crc kubenswrapper[4884]: I1210 00:53:59.496623 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.920140275 podStartE2EDuration="5.496601398s" podCreationTimestamp="2025-12-10 00:53:54 +0000 UTC" firstStartedPulling="2025-12-10 00:53:55.79305846 +0000 UTC m=+1408.871015577" lastFinishedPulling="2025-12-10 00:53:58.369519593 +0000 UTC m=+1411.447476700" observedRunningTime="2025-12-10 00:53:59.477725689 +0000 UTC m=+1412.555682856" watchObservedRunningTime="2025-12-10 00:53:59.496601398 +0000 UTC m=+1412.574558515" Dec 10 00:53:59 crc kubenswrapper[4884]: I1210 00:53:59.930589 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:53:59 crc kubenswrapper[4884]: E1210 00:53:59.930801 4884 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 00:53:59 crc kubenswrapper[4884]: E1210 00:53:59.930895 4884 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 00:53:59 crc kubenswrapper[4884]: E1210 00:53:59.930950 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift podName:77e9a322-ea03-4101-b8be-d1e09f67e8c2 nodeName:}" failed. No retries permitted until 2025-12-10 00:54:01.930933113 +0000 UTC m=+1415.008890230 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift") pod "swift-storage-0" (UID: "77e9a322-ea03-4101-b8be-d1e09f67e8c2") : configmap "swift-ring-files" not found Dec 10 00:54:00 crc kubenswrapper[4884]: I1210 00:54:00.458911 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" event={"ID":"20a98971-47d1-49fe-b09b-84acaa9b4c6f","Type":"ContainerStarted","Data":"2913a7ae2ba1c8a32103b08466fd0c5b5f862ef57ed46214644ecbc07a71aa9a"} Dec 10 00:54:00 crc kubenswrapper[4884]: I1210 00:54:00.459013 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:54:00 crc kubenswrapper[4884]: I1210 00:54:00.462507 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-t7f78" event={"ID":"e96bb397-11f0-4e24-aafc-9f399d5846b8","Type":"ContainerStarted","Data":"14c840a51ebecf098163fb4c71b025e5de74ce2c3031c93feb23a7b23c82ed7b"} Dec 10 00:54:00 crc kubenswrapper[4884]: I1210 00:54:00.489476 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" podStartSLOduration=4.489421293 podStartE2EDuration="4.489421293s" podCreationTimestamp="2025-12-10 00:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:54:00.48371915 +0000 UTC m=+1413.561676267" watchObservedRunningTime="2025-12-10 00:54:00.489421293 +0000 UTC m=+1413.567378410" Dec 10 00:54:01 crc kubenswrapper[4884]: I1210 00:54:01.475888 4884 generic.go:334] "Generic (PLEG): container finished" podID="30c1a0f0-5abf-4fac-89c9-afedea695fab" containerID="6c541d493db47c2905cbf64d221dc9713e7e0bc75458e8d1e34a4e3e901e329b" exitCode=0 Dec 10 00:54:01 crc kubenswrapper[4884]: I1210 00:54:01.475989 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"30c1a0f0-5abf-4fac-89c9-afedea695fab","Type":"ContainerDied","Data":"6c541d493db47c2905cbf64d221dc9713e7e0bc75458e8d1e34a4e3e901e329b"} Dec 10 00:54:01 crc kubenswrapper[4884]: I1210 00:54:01.491274 4884 generic.go:334] "Generic (PLEG): container finished" podID="d089c5ef-75b6-480e-b726-abd349a291cc" containerID="9b888f96e10346f125bc6ebb54a3d65e3f15428017766169357ea5bf609a6a36" exitCode=0 Dec 10 00:54:01 crc kubenswrapper[4884]: I1210 00:54:01.492404 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d089c5ef-75b6-480e-b726-abd349a291cc","Type":"ContainerDied","Data":"9b888f96e10346f125bc6ebb54a3d65e3f15428017766169357ea5bf609a6a36"} Dec 10 00:54:01 crc kubenswrapper[4884]: I1210 00:54:01.967764 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:54:01 crc kubenswrapper[4884]: E1210 00:54:01.967936 4884 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 00:54:01 crc kubenswrapper[4884]: E1210 00:54:01.967968 4884 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 00:54:01 crc kubenswrapper[4884]: E1210 00:54:01.968027 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift podName:77e9a322-ea03-4101-b8be-d1e09f67e8c2 nodeName:}" failed. No retries permitted until 2025-12-10 00:54:05.96800831 +0000 UTC m=+1419.045965427 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift") pod "swift-storage-0" (UID: "77e9a322-ea03-4101-b8be-d1e09f67e8c2") : configmap "swift-ring-files" not found Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.168663 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-fb8bc9c44-vbbc8" podUID="a6857870-023d-4183-9ab2-7c582110e7ad" containerName="console" containerID="cri-o://183b2f18ab1f60e64e0c910ceaf14a8e412fa52ee5708efd014b957d18517446" gracePeriod=15 Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.515113 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"23f4050b-2338-4211-9ff1-0997060904f8","Type":"ContainerStarted","Data":"002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5"} Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.517736 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d089c5ef-75b6-480e-b726-abd349a291cc","Type":"ContainerStarted","Data":"1a7d75952b96b4d3553883203c7a10949f1749f08149afda39c283e5322fc99d"} Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.519914 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"30c1a0f0-5abf-4fac-89c9-afedea695fab","Type":"ContainerStarted","Data":"4e51ba47275bab81ad76cc8b726a08f9103545bd788e1d7b8e4a5f1e0b70bc46"} Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.522097 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-fb8bc9c44-vbbc8_a6857870-023d-4183-9ab2-7c582110e7ad/console/0.log" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.522144 4884 generic.go:334] "Generic (PLEG): container finished" podID="a6857870-023d-4183-9ab2-7c582110e7ad" containerID="183b2f18ab1f60e64e0c910ceaf14a8e412fa52ee5708efd014b957d18517446" exitCode=2 Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.522167 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-fb8bc9c44-vbbc8" event={"ID":"a6857870-023d-4183-9ab2-7c582110e7ad","Type":"ContainerDied","Data":"183b2f18ab1f60e64e0c910ceaf14a8e412fa52ee5708efd014b957d18517446"} Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.566669 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=24.966002125 podStartE2EDuration="52.566649553s" podCreationTimestamp="2025-12-10 00:53:11 +0000 UTC" firstStartedPulling="2025-12-10 00:53:28.361496319 +0000 UTC m=+1381.439453436" lastFinishedPulling="2025-12-10 00:53:55.962143747 +0000 UTC m=+1409.040100864" observedRunningTime="2025-12-10 00:54:03.566335435 +0000 UTC m=+1416.644292572" watchObservedRunningTime="2025-12-10 00:54:03.566649553 +0000 UTC m=+1416.644606670" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.575196 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371986.279606 podStartE2EDuration="50.575170093s" podCreationTimestamp="2025-12-10 00:53:13 +0000 UTC" firstStartedPulling="2025-12-10 00:53:28.3630353 +0000 UTC m=+1381.440992417" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:54:03.545626506 +0000 UTC m=+1416.623583623" watchObservedRunningTime="2025-12-10 00:54:03.575170093 +0000 UTC m=+1416.653127210" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.654507 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-fb8bc9c44-vbbc8_a6857870-023d-4183-9ab2-7c582110e7ad/console/0.log" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.654580 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.803232 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-oauth-serving-cert\") pod \"a6857870-023d-4183-9ab2-7c582110e7ad\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.803310 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-trusted-ca-bundle\") pod \"a6857870-023d-4183-9ab2-7c582110e7ad\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.803330 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a6857870-023d-4183-9ab2-7c582110e7ad-console-serving-cert\") pod \"a6857870-023d-4183-9ab2-7c582110e7ad\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.803358 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bm2ht\" (UniqueName: \"kubernetes.io/projected/a6857870-023d-4183-9ab2-7c582110e7ad-kube-api-access-bm2ht\") pod \"a6857870-023d-4183-9ab2-7c582110e7ad\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.803464 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-console-config\") pod \"a6857870-023d-4183-9ab2-7c582110e7ad\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.803505 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-service-ca\") pod \"a6857870-023d-4183-9ab2-7c582110e7ad\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.804336 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-service-ca" (OuterVolumeSpecName: "service-ca") pod "a6857870-023d-4183-9ab2-7c582110e7ad" (UID: "a6857870-023d-4183-9ab2-7c582110e7ad"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.804583 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a6857870-023d-4183-9ab2-7c582110e7ad-console-oauth-config\") pod \"a6857870-023d-4183-9ab2-7c582110e7ad\" (UID: \"a6857870-023d-4183-9ab2-7c582110e7ad\") " Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.804589 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "a6857870-023d-4183-9ab2-7c582110e7ad" (UID: "a6857870-023d-4183-9ab2-7c582110e7ad"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.804779 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-console-config" (OuterVolumeSpecName: "console-config") pod "a6857870-023d-4183-9ab2-7c582110e7ad" (UID: "a6857870-023d-4183-9ab2-7c582110e7ad"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.804882 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "a6857870-023d-4183-9ab2-7c582110e7ad" (UID: "a6857870-023d-4183-9ab2-7c582110e7ad"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.805599 4884 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-console-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.805619 4884 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-service-ca\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.805627 4884 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.805636 4884 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a6857870-023d-4183-9ab2-7c582110e7ad-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.810200 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6857870-023d-4183-9ab2-7c582110e7ad-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "a6857870-023d-4183-9ab2-7c582110e7ad" (UID: "a6857870-023d-4183-9ab2-7c582110e7ad"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.810338 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6857870-023d-4183-9ab2-7c582110e7ad-kube-api-access-bm2ht" (OuterVolumeSpecName: "kube-api-access-bm2ht") pod "a6857870-023d-4183-9ab2-7c582110e7ad" (UID: "a6857870-023d-4183-9ab2-7c582110e7ad"). InnerVolumeSpecName "kube-api-access-bm2ht". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.810354 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6857870-023d-4183-9ab2-7c582110e7ad-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "a6857870-023d-4183-9ab2-7c582110e7ad" (UID: "a6857870-023d-4183-9ab2-7c582110e7ad"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.908186 4884 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a6857870-023d-4183-9ab2-7c582110e7ad-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.908599 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bm2ht\" (UniqueName: \"kubernetes.io/projected/a6857870-023d-4183-9ab2-7c582110e7ad-kube-api-access-bm2ht\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:03 crc kubenswrapper[4884]: I1210 00:54:03.908754 4884 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a6857870-023d-4183-9ab2-7c582110e7ad-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:04 crc kubenswrapper[4884]: I1210 00:54:04.413637 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Dec 10 00:54:04 crc kubenswrapper[4884]: I1210 00:54:04.413900 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Dec 10 00:54:04 crc kubenswrapper[4884]: I1210 00:54:04.532679 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-fb8bc9c44-vbbc8_a6857870-023d-4183-9ab2-7c582110e7ad/console/0.log" Dec 10 00:54:04 crc kubenswrapper[4884]: I1210 00:54:04.532804 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-fb8bc9c44-vbbc8" event={"ID":"a6857870-023d-4183-9ab2-7c582110e7ad","Type":"ContainerDied","Data":"17f07a63d2bcec616ccf04ee3c3c4359aecd73b2a83dc851cf61c2aca3fc0dbf"} Dec 10 00:54:04 crc kubenswrapper[4884]: I1210 00:54:04.532854 4884 scope.go:117] "RemoveContainer" containerID="183b2f18ab1f60e64e0c910ceaf14a8e412fa52ee5708efd014b957d18517446" Dec 10 00:54:04 crc kubenswrapper[4884]: I1210 00:54:04.532866 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-fb8bc9c44-vbbc8" Dec 10 00:54:04 crc kubenswrapper[4884]: I1210 00:54:04.572918 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-fb8bc9c44-vbbc8"] Dec 10 00:54:04 crc kubenswrapper[4884]: I1210 00:54:04.583634 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-fb8bc9c44-vbbc8"] Dec 10 00:54:05 crc kubenswrapper[4884]: I1210 00:54:05.305523 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6857870-023d-4183-9ab2-7c582110e7ad" path="/var/lib/kubelet/pods/a6857870-023d-4183-9ab2-7c582110e7ad/volumes" Dec 10 00:54:06 crc kubenswrapper[4884]: I1210 00:54:06.065792 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:54:06 crc kubenswrapper[4884]: E1210 00:54:06.065984 4884 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 00:54:06 crc kubenswrapper[4884]: E1210 00:54:06.066127 4884 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 00:54:06 crc kubenswrapper[4884]: E1210 00:54:06.066192 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift podName:77e9a322-ea03-4101-b8be-d1e09f67e8c2 nodeName:}" failed. No retries permitted until 2025-12-10 00:54:14.066173653 +0000 UTC m=+1427.144130770 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift") pod "swift-storage-0" (UID: "77e9a322-ea03-4101-b8be-d1e09f67e8c2") : configmap "swift-ring-files" not found Dec 10 00:54:06 crc kubenswrapper[4884]: I1210 00:54:06.551401 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"23f4050b-2338-4211-9ff1-0997060904f8","Type":"ContainerStarted","Data":"46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c"} Dec 10 00:54:07 crc kubenswrapper[4884]: I1210 00:54:07.081389 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 10 00:54:07 crc kubenswrapper[4884]: I1210 00:54:07.334657 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:54:07 crc kubenswrapper[4884]: I1210 00:54:07.394136 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-plqg7"] Dec 10 00:54:07 crc kubenswrapper[4884]: I1210 00:54:07.394342 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-plqg7" podUID="0f550bfc-1f18-4b79-85e0-420dc9f852c1" containerName="dnsmasq-dns" containerID="cri-o://41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2" gracePeriod=10 Dec 10 00:54:10 crc kubenswrapper[4884]: I1210 00:54:10.253814 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Dec 10 00:54:10 crc kubenswrapper[4884]: I1210 00:54:10.636204 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8554648995-plqg7" podUID="0f550bfc-1f18-4b79-85e0-420dc9f852c1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.134:5353: connect: connection refused" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.172226 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.274581 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5m59\" (UniqueName: \"kubernetes.io/projected/0f550bfc-1f18-4b79-85e0-420dc9f852c1-kube-api-access-z5m59\") pod \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.274656 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-ovsdbserver-sb\") pod \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.274786 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-config\") pod \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.274839 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-dns-svc\") pod \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.274862 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-ovsdbserver-nb\") pod \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\" (UID: \"0f550bfc-1f18-4b79-85e0-420dc9f852c1\") " Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.292787 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f550bfc-1f18-4b79-85e0-420dc9f852c1-kube-api-access-z5m59" (OuterVolumeSpecName: "kube-api-access-z5m59") pod "0f550bfc-1f18-4b79-85e0-420dc9f852c1" (UID: "0f550bfc-1f18-4b79-85e0-420dc9f852c1"). InnerVolumeSpecName "kube-api-access-z5m59". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.326219 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0f550bfc-1f18-4b79-85e0-420dc9f852c1" (UID: "0f550bfc-1f18-4b79-85e0-420dc9f852c1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.329049 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0f550bfc-1f18-4b79-85e0-420dc9f852c1" (UID: "0f550bfc-1f18-4b79-85e0-420dc9f852c1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.337365 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-config" (OuterVolumeSpecName: "config") pod "0f550bfc-1f18-4b79-85e0-420dc9f852c1" (UID: "0f550bfc-1f18-4b79-85e0-420dc9f852c1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.340393 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0f550bfc-1f18-4b79-85e0-420dc9f852c1" (UID: "0f550bfc-1f18-4b79-85e0-420dc9f852c1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.376832 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.377575 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.377589 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.377603 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5m59\" (UniqueName: \"kubernetes.io/projected/0f550bfc-1f18-4b79-85e0-420dc9f852c1-kube-api-access-z5m59\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.377616 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f550bfc-1f18-4b79-85e0-420dc9f852c1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.613187 4884 generic.go:334] "Generic (PLEG): container finished" podID="0f550bfc-1f18-4b79-85e0-420dc9f852c1" containerID="41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2" exitCode=0 Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.613242 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-plqg7" event={"ID":"0f550bfc-1f18-4b79-85e0-420dc9f852c1","Type":"ContainerDied","Data":"41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2"} Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.613280 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-plqg7" event={"ID":"0f550bfc-1f18-4b79-85e0-420dc9f852c1","Type":"ContainerDied","Data":"81dfa229e1757be7be947c8328b741b5037d14a57d228587df642b35d34f1eec"} Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.613302 4884 scope.go:117] "RemoveContainer" containerID="41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.613403 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-plqg7" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.639566 4884 scope.go:117] "RemoveContainer" containerID="446e6fba334794991b09a382a3f205b9831a403528c007953ec237b50f2832ea" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.660922 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-plqg7"] Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.673358 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-plqg7"] Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.676171 4884 scope.go:117] "RemoveContainer" containerID="41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2" Dec 10 00:54:11 crc kubenswrapper[4884]: E1210 00:54:11.676677 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2\": container with ID starting with 41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2 not found: ID does not exist" containerID="41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.676725 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2"} err="failed to get container status \"41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2\": rpc error: code = NotFound desc = could not find container \"41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2\": container with ID starting with 41d198bf6377f004fa7d702bb4283dfbdcbe82408d3fd1f2159888654ad8ade2 not found: ID does not exist" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.676757 4884 scope.go:117] "RemoveContainer" containerID="446e6fba334794991b09a382a3f205b9831a403528c007953ec237b50f2832ea" Dec 10 00:54:11 crc kubenswrapper[4884]: E1210 00:54:11.677251 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"446e6fba334794991b09a382a3f205b9831a403528c007953ec237b50f2832ea\": container with ID starting with 446e6fba334794991b09a382a3f205b9831a403528c007953ec237b50f2832ea not found: ID does not exist" containerID="446e6fba334794991b09a382a3f205b9831a403528c007953ec237b50f2832ea" Dec 10 00:54:11 crc kubenswrapper[4884]: I1210 00:54:11.677303 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"446e6fba334794991b09a382a3f205b9831a403528c007953ec237b50f2832ea"} err="failed to get container status \"446e6fba334794991b09a382a3f205b9831a403528c007953ec237b50f2832ea\": rpc error: code = NotFound desc = could not find container \"446e6fba334794991b09a382a3f205b9831a403528c007953ec237b50f2832ea\": container with ID starting with 446e6fba334794991b09a382a3f205b9831a403528c007953ec237b50f2832ea not found: ID does not exist" Dec 10 00:54:12 crc kubenswrapper[4884]: I1210 00:54:12.104581 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Dec 10 00:54:12 crc kubenswrapper[4884]: I1210 00:54:12.180028 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Dec 10 00:54:12 crc kubenswrapper[4884]: I1210 00:54:12.627953 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-t7f78" event={"ID":"e96bb397-11f0-4e24-aafc-9f399d5846b8","Type":"ContainerStarted","Data":"8c99ce683f4c01518266f10789886ba4f1cb34ec20fa0d439637bdc1c2f51c7d"} Dec 10 00:54:12 crc kubenswrapper[4884]: I1210 00:54:12.651041 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-t7f78" podStartSLOduration=2.358574307 podStartE2EDuration="14.651019199s" podCreationTimestamp="2025-12-10 00:53:58 +0000 UTC" firstStartedPulling="2025-12-10 00:53:59.506280599 +0000 UTC m=+1412.584237716" lastFinishedPulling="2025-12-10 00:54:11.798725491 +0000 UTC m=+1424.876682608" observedRunningTime="2025-12-10 00:54:12.650023363 +0000 UTC m=+1425.727980530" watchObservedRunningTime="2025-12-10 00:54:12.651019199 +0000 UTC m=+1425.728976326" Dec 10 00:54:13 crc kubenswrapper[4884]: I1210 00:54:13.041999 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Dec 10 00:54:13 crc kubenswrapper[4884]: I1210 00:54:13.042057 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Dec 10 00:54:13 crc kubenswrapper[4884]: I1210 00:54:13.297880 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f550bfc-1f18-4b79-85e0-420dc9f852c1" path="/var/lib/kubelet/pods/0f550bfc-1f18-4b79-85e0-420dc9f852c1/volumes" Dec 10 00:54:14 crc kubenswrapper[4884]: I1210 00:54:14.142605 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:54:14 crc kubenswrapper[4884]: E1210 00:54:14.142819 4884 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 10 00:54:14 crc kubenswrapper[4884]: E1210 00:54:14.143085 4884 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 10 00:54:14 crc kubenswrapper[4884]: E1210 00:54:14.143156 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift podName:77e9a322-ea03-4101-b8be-d1e09f67e8c2 nodeName:}" failed. No retries permitted until 2025-12-10 00:54:30.143135871 +0000 UTC m=+1443.221092998 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift") pod "swift-storage-0" (UID: "77e9a322-ea03-4101-b8be-d1e09f67e8c2") : configmap "swift-ring-files" not found Dec 10 00:54:14 crc kubenswrapper[4884]: I1210 00:54:14.644994 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"23f4050b-2338-4211-9ff1-0997060904f8","Type":"ContainerStarted","Data":"a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7"} Dec 10 00:54:14 crc kubenswrapper[4884]: I1210 00:54:14.681385 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=13.073678182 podStartE2EDuration="58.681360336s" podCreationTimestamp="2025-12-10 00:53:16 +0000 UTC" firstStartedPulling="2025-12-10 00:53:28.405913616 +0000 UTC m=+1381.483870733" lastFinishedPulling="2025-12-10 00:54:14.01359577 +0000 UTC m=+1427.091552887" observedRunningTime="2025-12-10 00:54:14.671278475 +0000 UTC m=+1427.749235602" watchObservedRunningTime="2025-12-10 00:54:14.681360336 +0000 UTC m=+1427.759317463" Dec 10 00:54:15 crc kubenswrapper[4884]: I1210 00:54:15.240116 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Dec 10 00:54:15 crc kubenswrapper[4884]: I1210 00:54:15.343510 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.582418 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.937417 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-pgk6x"] Dec 10 00:54:16 crc kubenswrapper[4884]: E1210 00:54:16.938074 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f550bfc-1f18-4b79-85e0-420dc9f852c1" containerName="dnsmasq-dns" Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.938090 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f550bfc-1f18-4b79-85e0-420dc9f852c1" containerName="dnsmasq-dns" Dec 10 00:54:16 crc kubenswrapper[4884]: E1210 00:54:16.938106 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f550bfc-1f18-4b79-85e0-420dc9f852c1" containerName="init" Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.938113 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f550bfc-1f18-4b79-85e0-420dc9f852c1" containerName="init" Dec 10 00:54:16 crc kubenswrapper[4884]: E1210 00:54:16.938122 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6857870-023d-4183-9ab2-7c582110e7ad" containerName="console" Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.938128 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6857870-023d-4183-9ab2-7c582110e7ad" containerName="console" Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.938301 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6857870-023d-4183-9ab2-7c582110e7ad" containerName="console" Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.938323 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f550bfc-1f18-4b79-85e0-420dc9f852c1" containerName="dnsmasq-dns" Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.939004 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.943922 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-07ac-account-create-update-vs7v2"] Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.945132 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.947052 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-db-secret" Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.958240 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-pgk6x"] Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.966197 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-07ac-account-create-update-vs7v2"] Dec 10 00:54:16 crc kubenswrapper[4884]: I1210 00:54:16.997219 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3-operator-scripts\") pod \"mysqld-exporter-07ac-account-create-update-vs7v2\" (UID: \"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3\") " pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.000329 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11394f81-f238-4929-8c40-4c279b8b39dc-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-pgk6x\" (UID: \"11394f81-f238-4929-8c40-4c279b8b39dc\") " pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.000419 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcp4g\" (UniqueName: \"kubernetes.io/projected/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3-kube-api-access-fcp4g\") pod \"mysqld-exporter-07ac-account-create-update-vs7v2\" (UID: \"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3\") " pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.000461 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb2qb\" (UniqueName: \"kubernetes.io/projected/11394f81-f238-4929-8c40-4c279b8b39dc-kube-api-access-nb2qb\") pod \"mysqld-exporter-openstack-db-create-pgk6x\" (UID: \"11394f81-f238-4929-8c40-4c279b8b39dc\") " pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.102084 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3-operator-scripts\") pod \"mysqld-exporter-07ac-account-create-update-vs7v2\" (UID: \"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3\") " pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.102175 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11394f81-f238-4929-8c40-4c279b8b39dc-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-pgk6x\" (UID: \"11394f81-f238-4929-8c40-4c279b8b39dc\") " pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.102213 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcp4g\" (UniqueName: \"kubernetes.io/projected/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3-kube-api-access-fcp4g\") pod \"mysqld-exporter-07ac-account-create-update-vs7v2\" (UID: \"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3\") " pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.102231 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb2qb\" (UniqueName: \"kubernetes.io/projected/11394f81-f238-4929-8c40-4c279b8b39dc-kube-api-access-nb2qb\") pod \"mysqld-exporter-openstack-db-create-pgk6x\" (UID: \"11394f81-f238-4929-8c40-4c279b8b39dc\") " pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.103215 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3-operator-scripts\") pod \"mysqld-exporter-07ac-account-create-update-vs7v2\" (UID: \"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3\") " pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.103697 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11394f81-f238-4929-8c40-4c279b8b39dc-operator-scripts\") pod \"mysqld-exporter-openstack-db-create-pgk6x\" (UID: \"11394f81-f238-4929-8c40-4c279b8b39dc\") " pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.120079 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb2qb\" (UniqueName: \"kubernetes.io/projected/11394f81-f238-4929-8c40-4c279b8b39dc-kube-api-access-nb2qb\") pod \"mysqld-exporter-openstack-db-create-pgk6x\" (UID: \"11394f81-f238-4929-8c40-4c279b8b39dc\") " pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.120518 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcp4g\" (UniqueName: \"kubernetes.io/projected/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3-kube-api-access-fcp4g\") pod \"mysqld-exporter-07ac-account-create-update-vs7v2\" (UID: \"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3\") " pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.259404 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.266863 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.671643 4884 generic.go:334] "Generic (PLEG): container finished" podID="7343006c-fda9-4e2d-8767-41ee4412c601" containerID="8e10a63d7c4265ae6e3042c0e1fd366e4fec44c8460e8ef66d0de06d49221460" exitCode=0 Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.672129 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7343006c-fda9-4e2d-8767-41ee4412c601","Type":"ContainerDied","Data":"8e10a63d7c4265ae6e3042c0e1fd366e4fec44c8460e8ef66d0de06d49221460"} Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.684648 4884 generic.go:334] "Generic (PLEG): container finished" podID="6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" containerID="4e3d15fd0e5ae08056b22ae7da7e8668a86ba45b7eaea66648647fc03028403b" exitCode=0 Dec 10 00:54:17 crc kubenswrapper[4884]: I1210 00:54:17.684696 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93","Type":"ContainerDied","Data":"4e3d15fd0e5ae08056b22ae7da7e8668a86ba45b7eaea66648647fc03028403b"} Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.048004 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-07ac-account-create-update-vs7v2"] Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.057001 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-pgk6x"] Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.300952 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.301007 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.303710 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.698164 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7343006c-fda9-4e2d-8767-41ee4412c601","Type":"ContainerStarted","Data":"78981e56c620a6650251f282dc2bc53d7cb907802cb58320c3a88eefc671b3cd"} Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.698444 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.700487 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" event={"ID":"11394f81-f238-4929-8c40-4c279b8b39dc","Type":"ContainerStarted","Data":"a79500e8ce470f15597df7aa0276a4db505c17ec9898e30349889d091c57be6c"} Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.700529 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" event={"ID":"11394f81-f238-4929-8c40-4c279b8b39dc","Type":"ContainerStarted","Data":"6f69d8f8916a8b2f45941de90653fa7341a5d88ff48d9fd125939bcd642f7584"} Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.703210 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93","Type":"ContainerStarted","Data":"80aff531440e5df8fdf3ed19c2426ac5174da004004613a3abf3fa9c7b5e565d"} Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.703382 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.706044 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" event={"ID":"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3","Type":"ContainerStarted","Data":"81d90abbac07409cefd4c4887221da72b4c59103116572fb6fd34353d62b7d64"} Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.706082 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" event={"ID":"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3","Type":"ContainerStarted","Data":"bc02b80c85d8bdf0b3f4ad42c2b70b168978df7bdb81d76ca2db9164b4caaf16"} Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.706369 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.729173 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=54.059914712 podStartE2EDuration="1m8.729149778s" podCreationTimestamp="2025-12-10 00:53:10 +0000 UTC" firstStartedPulling="2025-12-10 00:53:27.839766778 +0000 UTC m=+1380.917723895" lastFinishedPulling="2025-12-10 00:53:42.509001844 +0000 UTC m=+1395.586958961" observedRunningTime="2025-12-10 00:54:18.723020562 +0000 UTC m=+1431.800977689" watchObservedRunningTime="2025-12-10 00:54:18.729149778 +0000 UTC m=+1431.807106895" Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.752633 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=54.424212173 podStartE2EDuration="1m8.752615394s" podCreationTimestamp="2025-12-10 00:53:10 +0000 UTC" firstStartedPulling="2025-12-10 00:53:27.741087939 +0000 UTC m=+1380.819045056" lastFinishedPulling="2025-12-10 00:53:42.06949116 +0000 UTC m=+1395.147448277" observedRunningTime="2025-12-10 00:54:18.752020457 +0000 UTC m=+1431.829977594" watchObservedRunningTime="2025-12-10 00:54:18.752615394 +0000 UTC m=+1431.830572511" Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.771533 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" podStartSLOduration=2.771518755 podStartE2EDuration="2.771518755s" podCreationTimestamp="2025-12-10 00:54:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:54:18.768898254 +0000 UTC m=+1431.846855371" watchObservedRunningTime="2025-12-10 00:54:18.771518755 +0000 UTC m=+1431.849475872" Dec 10 00:54:18 crc kubenswrapper[4884]: I1210 00:54:18.812740 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" podStartSLOduration=2.812726461 podStartE2EDuration="2.812726461s" podCreationTimestamp="2025-12-10 00:54:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:54:18.788830834 +0000 UTC m=+1431.866787951" watchObservedRunningTime="2025-12-10 00:54:18.812726461 +0000 UTC m=+1431.890683578" Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.712847 4884 generic.go:334] "Generic (PLEG): container finished" podID="1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3" containerID="81d90abbac07409cefd4c4887221da72b4c59103116572fb6fd34353d62b7d64" exitCode=0 Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.712905 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" event={"ID":"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3","Type":"ContainerDied","Data":"81d90abbac07409cefd4c4887221da72b4c59103116572fb6fd34353d62b7d64"} Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.714474 4884 generic.go:334] "Generic (PLEG): container finished" podID="11394f81-f238-4929-8c40-4c279b8b39dc" containerID="a79500e8ce470f15597df7aa0276a4db505c17ec9898e30349889d091c57be6c" exitCode=0 Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.714537 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" event={"ID":"11394f81-f238-4929-8c40-4c279b8b39dc","Type":"ContainerDied","Data":"a79500e8ce470f15597df7aa0276a4db505c17ec9898e30349889d091c57be6c"} Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.715881 4884 generic.go:334] "Generic (PLEG): container finished" podID="e96bb397-11f0-4e24-aafc-9f399d5846b8" containerID="8c99ce683f4c01518266f10789886ba4f1cb34ec20fa0d439637bdc1c2f51c7d" exitCode=0 Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.716038 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-t7f78" event={"ID":"e96bb397-11f0-4e24-aafc-9f399d5846b8","Type":"ContainerDied","Data":"8c99ce683f4c01518266f10789886ba4f1cb34ec20fa0d439637bdc1c2f51c7d"} Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.864245 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-sr25b"] Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.865701 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-sr25b" Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.894467 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-sr25b"] Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.908112 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8h6v\" (UniqueName: \"kubernetes.io/projected/624c66a0-e094-4c3e-94f8-efe258817479-kube-api-access-j8h6v\") pod \"glance-db-create-sr25b\" (UID: \"624c66a0-e094-4c3e-94f8-efe258817479\") " pod="openstack/glance-db-create-sr25b" Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.908203 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624c66a0-e094-4c3e-94f8-efe258817479-operator-scripts\") pod \"glance-db-create-sr25b\" (UID: \"624c66a0-e094-4c3e-94f8-efe258817479\") " pod="openstack/glance-db-create-sr25b" Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.973058 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-5a59-account-create-update-5pspx"] Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.974218 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5a59-account-create-update-5pspx" Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.976281 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Dec 10 00:54:19 crc kubenswrapper[4884]: I1210 00:54:19.999844 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-5a59-account-create-update-5pspx"] Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.009737 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4cadd997-aad1-45be-9922-6e2805437098-operator-scripts\") pod \"glance-5a59-account-create-update-5pspx\" (UID: \"4cadd997-aad1-45be-9922-6e2805437098\") " pod="openstack/glance-5a59-account-create-update-5pspx" Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.009783 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tkkq\" (UniqueName: \"kubernetes.io/projected/4cadd997-aad1-45be-9922-6e2805437098-kube-api-access-9tkkq\") pod \"glance-5a59-account-create-update-5pspx\" (UID: \"4cadd997-aad1-45be-9922-6e2805437098\") " pod="openstack/glance-5a59-account-create-update-5pspx" Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.009908 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8h6v\" (UniqueName: \"kubernetes.io/projected/624c66a0-e094-4c3e-94f8-efe258817479-kube-api-access-j8h6v\") pod \"glance-db-create-sr25b\" (UID: \"624c66a0-e094-4c3e-94f8-efe258817479\") " pod="openstack/glance-db-create-sr25b" Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.009950 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624c66a0-e094-4c3e-94f8-efe258817479-operator-scripts\") pod \"glance-db-create-sr25b\" (UID: \"624c66a0-e094-4c3e-94f8-efe258817479\") " pod="openstack/glance-db-create-sr25b" Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.010591 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624c66a0-e094-4c3e-94f8-efe258817479-operator-scripts\") pod \"glance-db-create-sr25b\" (UID: \"624c66a0-e094-4c3e-94f8-efe258817479\") " pod="openstack/glance-db-create-sr25b" Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.048209 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8h6v\" (UniqueName: \"kubernetes.io/projected/624c66a0-e094-4c3e-94f8-efe258817479-kube-api-access-j8h6v\") pod \"glance-db-create-sr25b\" (UID: \"624c66a0-e094-4c3e-94f8-efe258817479\") " pod="openstack/glance-db-create-sr25b" Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.111921 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4cadd997-aad1-45be-9922-6e2805437098-operator-scripts\") pod \"glance-5a59-account-create-update-5pspx\" (UID: \"4cadd997-aad1-45be-9922-6e2805437098\") " pod="openstack/glance-5a59-account-create-update-5pspx" Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.112001 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tkkq\" (UniqueName: \"kubernetes.io/projected/4cadd997-aad1-45be-9922-6e2805437098-kube-api-access-9tkkq\") pod \"glance-5a59-account-create-update-5pspx\" (UID: \"4cadd997-aad1-45be-9922-6e2805437098\") " pod="openstack/glance-5a59-account-create-update-5pspx" Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.112562 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4cadd997-aad1-45be-9922-6e2805437098-operator-scripts\") pod \"glance-5a59-account-create-update-5pspx\" (UID: \"4cadd997-aad1-45be-9922-6e2805437098\") " pod="openstack/glance-5a59-account-create-update-5pspx" Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.175924 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tkkq\" (UniqueName: \"kubernetes.io/projected/4cadd997-aad1-45be-9922-6e2805437098-kube-api-access-9tkkq\") pod \"glance-5a59-account-create-update-5pspx\" (UID: \"4cadd997-aad1-45be-9922-6e2805437098\") " pod="openstack/glance-5a59-account-create-update-5pspx" Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.181148 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-sr25b" Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.287510 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5a59-account-create-update-5pspx" Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.318877 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.707763 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-sr25b"] Dec 10 00:54:20 crc kubenswrapper[4884]: W1210 00:54:20.709203 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod624c66a0_e094_4c3e_94f8_efe258817479.slice/crio-d1405c15401b0b575a880d7918c84b131536b784cbd2cd28559bf6bd032d9262 WatchSource:0}: Error finding container d1405c15401b0b575a880d7918c84b131536b784cbd2cd28559bf6bd032d9262: Status 404 returned error can't find the container with id d1405c15401b0b575a880d7918c84b131536b784cbd2cd28559bf6bd032d9262 Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.735223 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-sr25b" event={"ID":"624c66a0-e094-4c3e-94f8-efe258817479","Type":"ContainerStarted","Data":"d1405c15401b0b575a880d7918c84b131536b784cbd2cd28559bf6bd032d9262"} Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.735768 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="prometheus" containerID="cri-o://002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5" gracePeriod=600 Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.735895 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="config-reloader" containerID="cri-o://46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c" gracePeriod=600 Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.735879 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="thanos-sidecar" containerID="cri-o://a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7" gracePeriod=600 Dec 10 00:54:20 crc kubenswrapper[4884]: I1210 00:54:20.819337 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-5a59-account-create-update-5pspx"] Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.268960 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.340470 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11394f81-f238-4929-8c40-4c279b8b39dc-operator-scripts\") pod \"11394f81-f238-4929-8c40-4c279b8b39dc\" (UID: \"11394f81-f238-4929-8c40-4c279b8b39dc\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.340592 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nb2qb\" (UniqueName: \"kubernetes.io/projected/11394f81-f238-4929-8c40-4c279b8b39dc-kube-api-access-nb2qb\") pod \"11394f81-f238-4929-8c40-4c279b8b39dc\" (UID: \"11394f81-f238-4929-8c40-4c279b8b39dc\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.342328 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11394f81-f238-4929-8c40-4c279b8b39dc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "11394f81-f238-4929-8c40-4c279b8b39dc" (UID: "11394f81-f238-4929-8c40-4c279b8b39dc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.350326 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11394f81-f238-4929-8c40-4c279b8b39dc-kube-api-access-nb2qb" (OuterVolumeSpecName: "kube-api-access-nb2qb") pod "11394f81-f238-4929-8c40-4c279b8b39dc" (UID: "11394f81-f238-4929-8c40-4c279b8b39dc"). InnerVolumeSpecName "kube-api-access-nb2qb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.444204 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11394f81-f238-4929-8c40-4c279b8b39dc-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.444242 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nb2qb\" (UniqueName: \"kubernetes.io/projected/11394f81-f238-4929-8c40-4c279b8b39dc-kube-api-access-nb2qb\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.510496 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.519364 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.548261 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-combined-ca-bundle\") pod \"e96bb397-11f0-4e24-aafc-9f399d5846b8\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.548296 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-dispersionconf\") pod \"e96bb397-11f0-4e24-aafc-9f399d5846b8\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.548355 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e96bb397-11f0-4e24-aafc-9f399d5846b8-etc-swift\") pod \"e96bb397-11f0-4e24-aafc-9f399d5846b8\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.548458 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e96bb397-11f0-4e24-aafc-9f399d5846b8-scripts\") pod \"e96bb397-11f0-4e24-aafc-9f399d5846b8\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.548488 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3-operator-scripts\") pod \"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3\" (UID: \"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.548562 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcp4g\" (UniqueName: \"kubernetes.io/projected/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3-kube-api-access-fcp4g\") pod \"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3\" (UID: \"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.548579 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vx6mb\" (UniqueName: \"kubernetes.io/projected/e96bb397-11f0-4e24-aafc-9f399d5846b8-kube-api-access-vx6mb\") pod \"e96bb397-11f0-4e24-aafc-9f399d5846b8\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.548607 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-swiftconf\") pod \"e96bb397-11f0-4e24-aafc-9f399d5846b8\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.548631 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e96bb397-11f0-4e24-aafc-9f399d5846b8-ring-data-devices\") pod \"e96bb397-11f0-4e24-aafc-9f399d5846b8\" (UID: \"e96bb397-11f0-4e24-aafc-9f399d5846b8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.549347 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e96bb397-11f0-4e24-aafc-9f399d5846b8-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "e96bb397-11f0-4e24-aafc-9f399d5846b8" (UID: "e96bb397-11f0-4e24-aafc-9f399d5846b8"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.549954 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e96bb397-11f0-4e24-aafc-9f399d5846b8-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "e96bb397-11f0-4e24-aafc-9f399d5846b8" (UID: "e96bb397-11f0-4e24-aafc-9f399d5846b8"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.557656 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "e96bb397-11f0-4e24-aafc-9f399d5846b8" (UID: "e96bb397-11f0-4e24-aafc-9f399d5846b8"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.561474 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3" (UID: "1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.562270 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3-kube-api-access-fcp4g" (OuterVolumeSpecName: "kube-api-access-fcp4g") pod "1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3" (UID: "1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3"). InnerVolumeSpecName "kube-api-access-fcp4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.564747 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e96bb397-11f0-4e24-aafc-9f399d5846b8-kube-api-access-vx6mb" (OuterVolumeSpecName: "kube-api-access-vx6mb") pod "e96bb397-11f0-4e24-aafc-9f399d5846b8" (UID: "e96bb397-11f0-4e24-aafc-9f399d5846b8"). InnerVolumeSpecName "kube-api-access-vx6mb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.577519 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e96bb397-11f0-4e24-aafc-9f399d5846b8-scripts" (OuterVolumeSpecName: "scripts") pod "e96bb397-11f0-4e24-aafc-9f399d5846b8" (UID: "e96bb397-11f0-4e24-aafc-9f399d5846b8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.581635 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e96bb397-11f0-4e24-aafc-9f399d5846b8" (UID: "e96bb397-11f0-4e24-aafc-9f399d5846b8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.593327 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-m54sx" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.594847 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "e96bb397-11f0-4e24-aafc-9f399d5846b8" (UID: "e96bb397-11f0-4e24-aafc-9f399d5846b8"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.651796 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcp4g\" (UniqueName: \"kubernetes.io/projected/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3-kube-api-access-fcp4g\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.651838 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vx6mb\" (UniqueName: \"kubernetes.io/projected/e96bb397-11f0-4e24-aafc-9f399d5846b8-kube-api-access-vx6mb\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.651856 4884 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-swiftconf\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.651866 4884 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/e96bb397-11f0-4e24-aafc-9f399d5846b8-ring-data-devices\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.651875 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.651883 4884 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/e96bb397-11f0-4e24-aafc-9f399d5846b8-dispersionconf\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.651892 4884 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/e96bb397-11f0-4e24-aafc-9f399d5846b8-etc-swift\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.651900 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e96bb397-11f0-4e24-aafc-9f399d5846b8-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.651907 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.719392 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.746719 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" event={"ID":"11394f81-f238-4929-8c40-4c279b8b39dc","Type":"ContainerDied","Data":"6f69d8f8916a8b2f45941de90653fa7341a5d88ff48d9fd125939bcd642f7584"} Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.746757 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f69d8f8916a8b2f45941de90653fa7341a5d88ff48d9fd125939bcd642f7584" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.746822 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-db-create-pgk6x" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.753149 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-config\") pod \"23f4050b-2338-4211-9ff1-0997060904f8\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.753199 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-web-config\") pod \"23f4050b-2338-4211-9ff1-0997060904f8\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.753262 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-thanos-prometheus-http-client-file\") pod \"23f4050b-2338-4211-9ff1-0997060904f8\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.753309 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/23f4050b-2338-4211-9ff1-0997060904f8-prometheus-metric-storage-rulefiles-0\") pod \"23f4050b-2338-4211-9ff1-0997060904f8\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.753384 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/23f4050b-2338-4211-9ff1-0997060904f8-config-out\") pod \"23f4050b-2338-4211-9ff1-0997060904f8\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.753487 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-npxg7\" (UniqueName: \"kubernetes.io/projected/23f4050b-2338-4211-9ff1-0997060904f8-kube-api-access-npxg7\") pod \"23f4050b-2338-4211-9ff1-0997060904f8\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.753545 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"23f4050b-2338-4211-9ff1-0997060904f8\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.753646 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/23f4050b-2338-4211-9ff1-0997060904f8-tls-assets\") pod \"23f4050b-2338-4211-9ff1-0997060904f8\" (UID: \"23f4050b-2338-4211-9ff1-0997060904f8\") " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.756928 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23f4050b-2338-4211-9ff1-0997060904f8-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "23f4050b-2338-4211-9ff1-0997060904f8" (UID: "23f4050b-2338-4211-9ff1-0997060904f8"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.757295 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-t7f78" event={"ID":"e96bb397-11f0-4e24-aafc-9f399d5846b8","Type":"ContainerDied","Data":"14c840a51ebecf098163fb4c71b025e5de74ce2c3031c93feb23a7b23c82ed7b"} Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.757335 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14c840a51ebecf098163fb4c71b025e5de74ce2c3031c93feb23a7b23c82ed7b" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.757391 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-t7f78" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.759807 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23f4050b-2338-4211-9ff1-0997060904f8-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "23f4050b-2338-4211-9ff1-0997060904f8" (UID: "23f4050b-2338-4211-9ff1-0997060904f8"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.764222 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-config" (OuterVolumeSpecName: "config") pod "23f4050b-2338-4211-9ff1-0997060904f8" (UID: "23f4050b-2338-4211-9ff1-0997060904f8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.764280 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23f4050b-2338-4211-9ff1-0997060904f8-config-out" (OuterVolumeSpecName: "config-out") pod "23f4050b-2338-4211-9ff1-0997060904f8" (UID: "23f4050b-2338-4211-9ff1-0997060904f8"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.764636 4884 generic.go:334] "Generic (PLEG): container finished" podID="23f4050b-2338-4211-9ff1-0997060904f8" containerID="a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7" exitCode=0 Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.764656 4884 generic.go:334] "Generic (PLEG): container finished" podID="23f4050b-2338-4211-9ff1-0997060904f8" containerID="46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c" exitCode=0 Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.764663 4884 generic.go:334] "Generic (PLEG): container finished" podID="23f4050b-2338-4211-9ff1-0997060904f8" containerID="002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5" exitCode=0 Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.764701 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"23f4050b-2338-4211-9ff1-0997060904f8","Type":"ContainerDied","Data":"a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7"} Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.764727 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"23f4050b-2338-4211-9ff1-0997060904f8","Type":"ContainerDied","Data":"46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c"} Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.764737 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"23f4050b-2338-4211-9ff1-0997060904f8","Type":"ContainerDied","Data":"002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5"} Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.764749 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"23f4050b-2338-4211-9ff1-0997060904f8","Type":"ContainerDied","Data":"c035caf280c43e8a4da86390fa801ad96d2aa4c2b4f85572d2010554f11db6a4"} Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.764763 4884 scope.go:117] "RemoveContainer" containerID="a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.764899 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.767876 4884 generic.go:334] "Generic (PLEG): container finished" podID="4cadd997-aad1-45be-9922-6e2805437098" containerID="a0bae0e726bc3425819cfdb9bf38ef16c16cf8c22649d5a42a8934d76ad04c59" exitCode=0 Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.767937 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5a59-account-create-update-5pspx" event={"ID":"4cadd997-aad1-45be-9922-6e2805437098","Type":"ContainerDied","Data":"a0bae0e726bc3425819cfdb9bf38ef16c16cf8c22649d5a42a8934d76ad04c59"} Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.767967 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5a59-account-create-update-5pspx" event={"ID":"4cadd997-aad1-45be-9922-6e2805437098","Type":"ContainerStarted","Data":"543c3bbb8262693884ad0c330c65ce04d83dfb664a9990f1fce62770bb1a90a1"} Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.769953 4884 generic.go:334] "Generic (PLEG): container finished" podID="624c66a0-e094-4c3e-94f8-efe258817479" containerID="8d7cf833cb3bedf29d290fc40d21ba80e92a975043e5618c95c9a644ee88a057" exitCode=0 Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.770027 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-sr25b" event={"ID":"624c66a0-e094-4c3e-94f8-efe258817479","Type":"ContainerDied","Data":"8d7cf833cb3bedf29d290fc40d21ba80e92a975043e5618c95c9a644ee88a057"} Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.771013 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "23f4050b-2338-4211-9ff1-0997060904f8" (UID: "23f4050b-2338-4211-9ff1-0997060904f8"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.771765 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" event={"ID":"1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3","Type":"ContainerDied","Data":"bc02b80c85d8bdf0b3f4ad42c2b70b168978df7bdb81d76ca2db9164b4caaf16"} Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.771796 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc02b80c85d8bdf0b3f4ad42c2b70b168978df7bdb81d76ca2db9164b4caaf16" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.771832 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-07ac-account-create-update-vs7v2" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.785218 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23f4050b-2338-4211-9ff1-0997060904f8-kube-api-access-npxg7" (OuterVolumeSpecName: "kube-api-access-npxg7") pod "23f4050b-2338-4211-9ff1-0997060904f8" (UID: "23f4050b-2338-4211-9ff1-0997060904f8"). InnerVolumeSpecName "kube-api-access-npxg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.791111 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "23f4050b-2338-4211-9ff1-0997060904f8" (UID: "23f4050b-2338-4211-9ff1-0997060904f8"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.792343 4884 scope.go:117] "RemoveContainer" containerID="46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.811751 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-web-config" (OuterVolumeSpecName: "web-config") pod "23f4050b-2338-4211-9ff1-0997060904f8" (UID: "23f4050b-2338-4211-9ff1-0997060904f8"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.813501 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-577tv-config-qszf8"] Dec 10 00:54:21 crc kubenswrapper[4884]: E1210 00:54:21.813831 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="config-reloader" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.813847 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="config-reloader" Dec 10 00:54:21 crc kubenswrapper[4884]: E1210 00:54:21.813859 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11394f81-f238-4929-8c40-4c279b8b39dc" containerName="mariadb-database-create" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.813865 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="11394f81-f238-4929-8c40-4c279b8b39dc" containerName="mariadb-database-create" Dec 10 00:54:21 crc kubenswrapper[4884]: E1210 00:54:21.813880 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="thanos-sidecar" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.813886 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="thanos-sidecar" Dec 10 00:54:21 crc kubenswrapper[4884]: E1210 00:54:21.813894 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3" containerName="mariadb-account-create-update" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.813900 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3" containerName="mariadb-account-create-update" Dec 10 00:54:21 crc kubenswrapper[4884]: E1210 00:54:21.813910 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e96bb397-11f0-4e24-aafc-9f399d5846b8" containerName="swift-ring-rebalance" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.813915 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e96bb397-11f0-4e24-aafc-9f399d5846b8" containerName="swift-ring-rebalance" Dec 10 00:54:21 crc kubenswrapper[4884]: E1210 00:54:21.813935 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="init-config-reloader" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.813942 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="init-config-reloader" Dec 10 00:54:21 crc kubenswrapper[4884]: E1210 00:54:21.813953 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="prometheus" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.813959 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="prometheus" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.814131 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="config-reloader" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.814143 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="11394f81-f238-4929-8c40-4c279b8b39dc" containerName="mariadb-database-create" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.814153 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="prometheus" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.814168 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e96bb397-11f0-4e24-aafc-9f399d5846b8" containerName="swift-ring-rebalance" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.814180 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3" containerName="mariadb-account-create-update" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.814190 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="23f4050b-2338-4211-9ff1-0997060904f8" containerName="thanos-sidecar" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.815195 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.823387 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.845052 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-577tv-config-qszf8"] Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865013 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b13f3d75-9027-465c-bbc4-5195fa3f24e7-additional-scripts\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865056 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-run-ovn\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865105 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-log-ovn\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865163 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mn9hf\" (UniqueName: \"kubernetes.io/projected/b13f3d75-9027-465c-bbc4-5195fa3f24e7-kube-api-access-mn9hf\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865194 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-run\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865240 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b13f3d75-9027-465c-bbc4-5195fa3f24e7-scripts\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865301 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865314 4884 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/23f4050b-2338-4211-9ff1-0997060904f8-tls-assets\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865324 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865333 4884 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-web-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865343 4884 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/23f4050b-2338-4211-9ff1-0997060904f8-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865353 4884 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/23f4050b-2338-4211-9ff1-0997060904f8-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865363 4884 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/23f4050b-2338-4211-9ff1-0997060904f8-config-out\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.865373 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-npxg7\" (UniqueName: \"kubernetes.io/projected/23f4050b-2338-4211-9ff1-0997060904f8-kube-api-access-npxg7\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.879731 4884 scope.go:117] "RemoveContainer" containerID="002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.890527 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.902568 4884 scope.go:117] "RemoveContainer" containerID="bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.921497 4884 scope.go:117] "RemoveContainer" containerID="a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7" Dec 10 00:54:21 crc kubenswrapper[4884]: E1210 00:54:21.922826 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7\": container with ID starting with a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7 not found: ID does not exist" containerID="a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.922875 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7"} err="failed to get container status \"a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7\": rpc error: code = NotFound desc = could not find container \"a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7\": container with ID starting with a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7 not found: ID does not exist" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.922903 4884 scope.go:117] "RemoveContainer" containerID="46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c" Dec 10 00:54:21 crc kubenswrapper[4884]: E1210 00:54:21.923291 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c\": container with ID starting with 46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c not found: ID does not exist" containerID="46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.923403 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c"} err="failed to get container status \"46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c\": rpc error: code = NotFound desc = could not find container \"46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c\": container with ID starting with 46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c not found: ID does not exist" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.923540 4884 scope.go:117] "RemoveContainer" containerID="002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5" Dec 10 00:54:21 crc kubenswrapper[4884]: E1210 00:54:21.923878 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5\": container with ID starting with 002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5 not found: ID does not exist" containerID="002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.923904 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5"} err="failed to get container status \"002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5\": rpc error: code = NotFound desc = could not find container \"002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5\": container with ID starting with 002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5 not found: ID does not exist" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.923919 4884 scope.go:117] "RemoveContainer" containerID="bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7" Dec 10 00:54:21 crc kubenswrapper[4884]: E1210 00:54:21.924237 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7\": container with ID starting with bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7 not found: ID does not exist" containerID="bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.924270 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7"} err="failed to get container status \"bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7\": rpc error: code = NotFound desc = could not find container \"bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7\": container with ID starting with bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7 not found: ID does not exist" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.924299 4884 scope.go:117] "RemoveContainer" containerID="a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.924564 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7"} err="failed to get container status \"a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7\": rpc error: code = NotFound desc = could not find container \"a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7\": container with ID starting with a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7 not found: ID does not exist" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.924617 4884 scope.go:117] "RemoveContainer" containerID="46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.924806 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c"} err="failed to get container status \"46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c\": rpc error: code = NotFound desc = could not find container \"46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c\": container with ID starting with 46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c not found: ID does not exist" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.924830 4884 scope.go:117] "RemoveContainer" containerID="002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.925221 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5"} err="failed to get container status \"002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5\": rpc error: code = NotFound desc = could not find container \"002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5\": container with ID starting with 002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5 not found: ID does not exist" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.925246 4884 scope.go:117] "RemoveContainer" containerID="bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.925482 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7"} err="failed to get container status \"bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7\": rpc error: code = NotFound desc = could not find container \"bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7\": container with ID starting with bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7 not found: ID does not exist" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.925504 4884 scope.go:117] "RemoveContainer" containerID="a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.925742 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7"} err="failed to get container status \"a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7\": rpc error: code = NotFound desc = could not find container \"a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7\": container with ID starting with a1dbb62e78e2bf12e642a8cef331aa623c25a9f296f0788d6c7a6e9c7c695ab7 not found: ID does not exist" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.925775 4884 scope.go:117] "RemoveContainer" containerID="46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.925964 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c"} err="failed to get container status \"46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c\": rpc error: code = NotFound desc = could not find container \"46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c\": container with ID starting with 46cf8204ef01791e4253212d79c62fc4707f750d9c7798bb3d53e07d1e770a5c not found: ID does not exist" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.925989 4884 scope.go:117] "RemoveContainer" containerID="002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.926204 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5"} err="failed to get container status \"002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5\": rpc error: code = NotFound desc = could not find container \"002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5\": container with ID starting with 002f0f2808cc910f71dd68ede9d01a463ba77d87021b5f9d15249cfdd6a95eb5 not found: ID does not exist" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.926226 4884 scope.go:117] "RemoveContainer" containerID="bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.926399 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7"} err="failed to get container status \"bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7\": rpc error: code = NotFound desc = could not find container \"bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7\": container with ID starting with bf75d5b438dd38c3fae6c535d1df81689e4d0dcf36a3bafda916eb8c830f42c7 not found: ID does not exist" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.967569 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mn9hf\" (UniqueName: \"kubernetes.io/projected/b13f3d75-9027-465c-bbc4-5195fa3f24e7-kube-api-access-mn9hf\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.967635 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-run\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.967684 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b13f3d75-9027-465c-bbc4-5195fa3f24e7-scripts\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.967769 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b13f3d75-9027-465c-bbc4-5195fa3f24e7-additional-scripts\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.967793 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-run-ovn\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.968072 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-run\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.968074 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-run-ovn\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.968504 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b13f3d75-9027-465c-bbc4-5195fa3f24e7-additional-scripts\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.969539 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b13f3d75-9027-465c-bbc4-5195fa3f24e7-scripts\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.969694 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-log-ovn\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.969792 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-log-ovn\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.969866 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:21 crc kubenswrapper[4884]: I1210 00:54:21.982934 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mn9hf\" (UniqueName: \"kubernetes.io/projected/b13f3d75-9027-465c-bbc4-5195fa3f24e7-kube-api-access-mn9hf\") pod \"ovn-controller-577tv-config-qszf8\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.138739 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.148232 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.171091 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.173696 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.175451 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.175536 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-jjh9h" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.176179 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.176313 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.176464 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.176723 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.192346 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.201054 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.202241 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.275148 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.275211 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/9879cc81-0cad-4e77-90e5-46afd9adb241-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.275275 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/9879cc81-0cad-4e77-90e5-46afd9adb241-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.275316 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.275345 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.275369 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/9879cc81-0cad-4e77-90e5-46afd9adb241-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.275595 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-config\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.275663 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.275719 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfzxz\" (UniqueName: \"kubernetes.io/projected/9879cc81-0cad-4e77-90e5-46afd9adb241-kube-api-access-bfzxz\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.275741 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.275783 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.377215 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.377549 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/9879cc81-0cad-4e77-90e5-46afd9adb241-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.377575 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/9879cc81-0cad-4e77-90e5-46afd9adb241-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.377603 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.377622 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.377641 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/9879cc81-0cad-4e77-90e5-46afd9adb241-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.377701 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-config\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.377727 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.377753 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.377772 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfzxz\" (UniqueName: \"kubernetes.io/projected/9879cc81-0cad-4e77-90e5-46afd9adb241-kube-api-access-bfzxz\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.377800 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.378047 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.378591 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/9879cc81-0cad-4e77-90e5-46afd9adb241-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.383966 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/9879cc81-0cad-4e77-90e5-46afd9adb241-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.384802 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.385385 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.387088 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.387148 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-config\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.388012 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.396019 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/9879cc81-0cad-4e77-90e5-46afd9adb241-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.396671 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/9879cc81-0cad-4e77-90e5-46afd9adb241-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.405468 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfzxz\" (UniqueName: \"kubernetes.io/projected/9879cc81-0cad-4e77-90e5-46afd9adb241-kube-api-access-bfzxz\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.432739 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"prometheus-metric-storage-0\" (UID: \"9879cc81-0cad-4e77-90e5-46afd9adb241\") " pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.490624 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.586696 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-577tv-config-qszf8"] Dec 10 00:54:22 crc kubenswrapper[4884]: I1210 00:54:22.794644 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-577tv-config-qszf8" event={"ID":"b13f3d75-9027-465c-bbc4-5195fa3f24e7","Type":"ContainerStarted","Data":"da9bd92743f6c494648019277f66cc4deefe69baf5fdbf24bae2703534766cc7"} Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.143055 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.307575 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23f4050b-2338-4211-9ff1-0997060904f8" path="/var/lib/kubelet/pods/23f4050b-2338-4211-9ff1-0997060904f8/volumes" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.331484 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-sr25b" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.336517 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5a59-account-create-update-5pspx" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.400447 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4cadd997-aad1-45be-9922-6e2805437098-operator-scripts\") pod \"4cadd997-aad1-45be-9922-6e2805437098\" (UID: \"4cadd997-aad1-45be-9922-6e2805437098\") " Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.400777 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8h6v\" (UniqueName: \"kubernetes.io/projected/624c66a0-e094-4c3e-94f8-efe258817479-kube-api-access-j8h6v\") pod \"624c66a0-e094-4c3e-94f8-efe258817479\" (UID: \"624c66a0-e094-4c3e-94f8-efe258817479\") " Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.400862 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tkkq\" (UniqueName: \"kubernetes.io/projected/4cadd997-aad1-45be-9922-6e2805437098-kube-api-access-9tkkq\") pod \"4cadd997-aad1-45be-9922-6e2805437098\" (UID: \"4cadd997-aad1-45be-9922-6e2805437098\") " Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.400941 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624c66a0-e094-4c3e-94f8-efe258817479-operator-scripts\") pod \"624c66a0-e094-4c3e-94f8-efe258817479\" (UID: \"624c66a0-e094-4c3e-94f8-efe258817479\") " Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.401954 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cadd997-aad1-45be-9922-6e2805437098-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4cadd997-aad1-45be-9922-6e2805437098" (UID: "4cadd997-aad1-45be-9922-6e2805437098"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.402243 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/624c66a0-e094-4c3e-94f8-efe258817479-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "624c66a0-e094-4c3e-94f8-efe258817479" (UID: "624c66a0-e094-4c3e-94f8-efe258817479"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.405322 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/624c66a0-e094-4c3e-94f8-efe258817479-kube-api-access-j8h6v" (OuterVolumeSpecName: "kube-api-access-j8h6v") pod "624c66a0-e094-4c3e-94f8-efe258817479" (UID: "624c66a0-e094-4c3e-94f8-efe258817479"). InnerVolumeSpecName "kube-api-access-j8h6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.405734 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cadd997-aad1-45be-9922-6e2805437098-kube-api-access-9tkkq" (OuterVolumeSpecName: "kube-api-access-9tkkq") pod "4cadd997-aad1-45be-9922-6e2805437098" (UID: "4cadd997-aad1-45be-9922-6e2805437098"). InnerVolumeSpecName "kube-api-access-9tkkq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.502645 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tkkq\" (UniqueName: \"kubernetes.io/projected/4cadd997-aad1-45be-9922-6e2805437098-kube-api-access-9tkkq\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.502692 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/624c66a0-e094-4c3e-94f8-efe258817479-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.502705 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4cadd997-aad1-45be-9922-6e2805437098-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.502716 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8h6v\" (UniqueName: \"kubernetes.io/projected/624c66a0-e094-4c3e-94f8-efe258817479-kube-api-access-j8h6v\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.815136 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5a59-account-create-update-5pspx" event={"ID":"4cadd997-aad1-45be-9922-6e2805437098","Type":"ContainerDied","Data":"543c3bbb8262693884ad0c330c65ce04d83dfb664a9990f1fce62770bb1a90a1"} Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.815175 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="543c3bbb8262693884ad0c330c65ce04d83dfb664a9990f1fce62770bb1a90a1" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.815203 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5a59-account-create-update-5pspx" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.816611 4884 generic.go:334] "Generic (PLEG): container finished" podID="b13f3d75-9027-465c-bbc4-5195fa3f24e7" containerID="ca6b9c1be04a3c343b8791a5d25231a9ffbb990934461979bc2e463f071ad1f0" exitCode=0 Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.816690 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-577tv-config-qszf8" event={"ID":"b13f3d75-9027-465c-bbc4-5195fa3f24e7","Type":"ContainerDied","Data":"ca6b9c1be04a3c343b8791a5d25231a9ffbb990934461979bc2e463f071ad1f0"} Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.818996 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-sr25b" event={"ID":"624c66a0-e094-4c3e-94f8-efe258817479","Type":"ContainerDied","Data":"d1405c15401b0b575a880d7918c84b131536b784cbd2cd28559bf6bd032d9262"} Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.819048 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d1405c15401b0b575a880d7918c84b131536b784cbd2cd28559bf6bd032d9262" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.819053 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-sr25b" Dec 10 00:54:23 crc kubenswrapper[4884]: I1210 00:54:23.820155 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9879cc81-0cad-4e77-90e5-46afd9adb241","Type":"ContainerStarted","Data":"dc5e02094f235e8c43fef46036649fb2e40313612acda074a478115e68a6bbd2"} Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.274961 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-x6mx8"] Dec 10 00:54:25 crc kubenswrapper[4884]: E1210 00:54:24.275477 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="624c66a0-e094-4c3e-94f8-efe258817479" containerName="mariadb-database-create" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.275494 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="624c66a0-e094-4c3e-94f8-efe258817479" containerName="mariadb-database-create" Dec 10 00:54:25 crc kubenswrapper[4884]: E1210 00:54:24.275531 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cadd997-aad1-45be-9922-6e2805437098" containerName="mariadb-account-create-update" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.275540 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cadd997-aad1-45be-9922-6e2805437098" containerName="mariadb-account-create-update" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.275759 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cadd997-aad1-45be-9922-6e2805437098" containerName="mariadb-account-create-update" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.275792 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="624c66a0-e094-4c3e-94f8-efe258817479" containerName="mariadb-database-create" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.276658 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-x6mx8" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.291542 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-x6mx8"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.316801 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f111e4f-54f4-4964-8ccf-d6336884eeec-operator-scripts\") pod \"keystone-db-create-x6mx8\" (UID: \"7f111e4f-54f4-4964-8ccf-d6336884eeec\") " pod="openstack/keystone-db-create-x6mx8" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.316855 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pd55p\" (UniqueName: \"kubernetes.io/projected/7f111e4f-54f4-4964-8ccf-d6336884eeec-kube-api-access-pd55p\") pod \"keystone-db-create-x6mx8\" (UID: \"7f111e4f-54f4-4964-8ccf-d6336884eeec\") " pod="openstack/keystone-db-create-x6mx8" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.341258 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-9058-account-create-update-sfww4"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.342408 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-9058-account-create-update-sfww4" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.347820 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.356166 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-9058-account-create-update-sfww4"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.419146 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b6ae20d-aec5-445c-82be-75954a52176b-operator-scripts\") pod \"keystone-9058-account-create-update-sfww4\" (UID: \"4b6ae20d-aec5-445c-82be-75954a52176b\") " pod="openstack/keystone-9058-account-create-update-sfww4" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.419542 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f111e4f-54f4-4964-8ccf-d6336884eeec-operator-scripts\") pod \"keystone-db-create-x6mx8\" (UID: \"7f111e4f-54f4-4964-8ccf-d6336884eeec\") " pod="openstack/keystone-db-create-x6mx8" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.419590 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pd55p\" (UniqueName: \"kubernetes.io/projected/7f111e4f-54f4-4964-8ccf-d6336884eeec-kube-api-access-pd55p\") pod \"keystone-db-create-x6mx8\" (UID: \"7f111e4f-54f4-4964-8ccf-d6336884eeec\") " pod="openstack/keystone-db-create-x6mx8" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.419647 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4gwh\" (UniqueName: \"kubernetes.io/projected/4b6ae20d-aec5-445c-82be-75954a52176b-kube-api-access-w4gwh\") pod \"keystone-9058-account-create-update-sfww4\" (UID: \"4b6ae20d-aec5-445c-82be-75954a52176b\") " pod="openstack/keystone-9058-account-create-update-sfww4" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.420376 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f111e4f-54f4-4964-8ccf-d6336884eeec-operator-scripts\") pod \"keystone-db-create-x6mx8\" (UID: \"7f111e4f-54f4-4964-8ccf-d6336884eeec\") " pod="openstack/keystone-db-create-x6mx8" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.448090 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pd55p\" (UniqueName: \"kubernetes.io/projected/7f111e4f-54f4-4964-8ccf-d6336884eeec-kube-api-access-pd55p\") pod \"keystone-db-create-x6mx8\" (UID: \"7f111e4f-54f4-4964-8ccf-d6336884eeec\") " pod="openstack/keystone-db-create-x6mx8" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.521189 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b6ae20d-aec5-445c-82be-75954a52176b-operator-scripts\") pod \"keystone-9058-account-create-update-sfww4\" (UID: \"4b6ae20d-aec5-445c-82be-75954a52176b\") " pod="openstack/keystone-9058-account-create-update-sfww4" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.521270 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4gwh\" (UniqueName: \"kubernetes.io/projected/4b6ae20d-aec5-445c-82be-75954a52176b-kube-api-access-w4gwh\") pod \"keystone-9058-account-create-update-sfww4\" (UID: \"4b6ae20d-aec5-445c-82be-75954a52176b\") " pod="openstack/keystone-9058-account-create-update-sfww4" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.522002 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b6ae20d-aec5-445c-82be-75954a52176b-operator-scripts\") pod \"keystone-9058-account-create-update-sfww4\" (UID: \"4b6ae20d-aec5-445c-82be-75954a52176b\") " pod="openstack/keystone-9058-account-create-update-sfww4" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.551068 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4gwh\" (UniqueName: \"kubernetes.io/projected/4b6ae20d-aec5-445c-82be-75954a52176b-kube-api-access-w4gwh\") pod \"keystone-9058-account-create-update-sfww4\" (UID: \"4b6ae20d-aec5-445c-82be-75954a52176b\") " pod="openstack/keystone-9058-account-create-update-sfww4" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.557788 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-sz9lb"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.559316 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-sz9lb" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.566654 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-sz9lb"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.616657 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-x6mx8" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.627560 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-ad48-account-create-update-rdb85"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.628760 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-ad48-account-create-update-rdb85" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.628780 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6msll\" (UniqueName: \"kubernetes.io/projected/6df655e4-396f-4fc7-8b31-ae3ee55134f2-kube-api-access-6msll\") pod \"placement-db-create-sz9lb\" (UID: \"6df655e4-396f-4fc7-8b31-ae3ee55134f2\") " pod="openstack/placement-db-create-sz9lb" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.628903 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6df655e4-396f-4fc7-8b31-ae3ee55134f2-operator-scripts\") pod \"placement-db-create-sz9lb\" (UID: \"6df655e4-396f-4fc7-8b31-ae3ee55134f2\") " pod="openstack/placement-db-create-sz9lb" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.631860 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.664778 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-9058-account-create-update-sfww4" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.670195 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-ad48-account-create-update-rdb85"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.731788 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75856\" (UniqueName: \"kubernetes.io/projected/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a-kube-api-access-75856\") pod \"placement-ad48-account-create-update-rdb85\" (UID: \"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a\") " pod="openstack/placement-ad48-account-create-update-rdb85" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.731868 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6msll\" (UniqueName: \"kubernetes.io/projected/6df655e4-396f-4fc7-8b31-ae3ee55134f2-kube-api-access-6msll\") pod \"placement-db-create-sz9lb\" (UID: \"6df655e4-396f-4fc7-8b31-ae3ee55134f2\") " pod="openstack/placement-db-create-sz9lb" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.731970 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6df655e4-396f-4fc7-8b31-ae3ee55134f2-operator-scripts\") pod \"placement-db-create-sz9lb\" (UID: \"6df655e4-396f-4fc7-8b31-ae3ee55134f2\") " pod="openstack/placement-db-create-sz9lb" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.732001 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a-operator-scripts\") pod \"placement-ad48-account-create-update-rdb85\" (UID: \"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a\") " pod="openstack/placement-ad48-account-create-update-rdb85" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.732875 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6df655e4-396f-4fc7-8b31-ae3ee55134f2-operator-scripts\") pod \"placement-db-create-sz9lb\" (UID: \"6df655e4-396f-4fc7-8b31-ae3ee55134f2\") " pod="openstack/placement-db-create-sz9lb" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.752090 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6msll\" (UniqueName: \"kubernetes.io/projected/6df655e4-396f-4fc7-8b31-ae3ee55134f2-kube-api-access-6msll\") pod \"placement-db-create-sz9lb\" (UID: \"6df655e4-396f-4fc7-8b31-ae3ee55134f2\") " pod="openstack/placement-db-create-sz9lb" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.833821 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75856\" (UniqueName: \"kubernetes.io/projected/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a-kube-api-access-75856\") pod \"placement-ad48-account-create-update-rdb85\" (UID: \"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a\") " pod="openstack/placement-ad48-account-create-update-rdb85" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.833971 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a-operator-scripts\") pod \"placement-ad48-account-create-update-rdb85\" (UID: \"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a\") " pod="openstack/placement-ad48-account-create-update-rdb85" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.835374 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a-operator-scripts\") pod \"placement-ad48-account-create-update-rdb85\" (UID: \"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a\") " pod="openstack/placement-ad48-account-create-update-rdb85" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.856715 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75856\" (UniqueName: \"kubernetes.io/projected/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a-kube-api-access-75856\") pod \"placement-ad48-account-create-update-rdb85\" (UID: \"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a\") " pod="openstack/placement-ad48-account-create-update-rdb85" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:24.928590 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-sz9lb" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.019481 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-ad48-account-create-update-rdb85" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.265663 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-bd4dd"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.270041 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.273771 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-k7tsc" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.290242 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.337635 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-bd4dd"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.452204 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-combined-ca-bundle\") pod \"glance-db-sync-bd4dd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.452291 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-config-data\") pod \"glance-db-sync-bd4dd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.452360 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-db-sync-config-data\") pod \"glance-db-sync-bd4dd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.452397 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtcgj\" (UniqueName: \"kubernetes.io/projected/db50dd06-d67c-468e-88de-6a8fb86bd1bd-kube-api-access-gtcgj\") pod \"glance-db-sync-bd4dd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.554032 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-config-data\") pod \"glance-db-sync-bd4dd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.554128 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-db-sync-config-data\") pod \"glance-db-sync-bd4dd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.554165 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtcgj\" (UniqueName: \"kubernetes.io/projected/db50dd06-d67c-468e-88de-6a8fb86bd1bd-kube-api-access-gtcgj\") pod \"glance-db-sync-bd4dd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.554204 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-combined-ca-bundle\") pod \"glance-db-sync-bd4dd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.559826 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-combined-ca-bundle\") pod \"glance-db-sync-bd4dd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.561331 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-config-data\") pod \"glance-db-sync-bd4dd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.563145 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-db-sync-config-data\") pod \"glance-db-sync-bd4dd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.586215 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-x6mx8"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.624488 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-9058-account-create-update-sfww4"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.672158 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtcgj\" (UniqueName: \"kubernetes.io/projected/db50dd06-d67c-468e-88de-6a8fb86bd1bd-kube-api-access-gtcgj\") pod \"glance-db-sync-bd4dd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.812675 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-sz9lb"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.821991 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-ad48-account-create-update-rdb85"] Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.837883 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-577tv-config-qszf8" event={"ID":"b13f3d75-9027-465c-bbc4-5195fa3f24e7","Type":"ContainerDied","Data":"da9bd92743f6c494648019277f66cc4deefe69baf5fdbf24bae2703534766cc7"} Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.837929 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da9bd92743f6c494648019277f66cc4deefe69baf5fdbf24bae2703534766cc7" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.839548 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-9058-account-create-update-sfww4" event={"ID":"4b6ae20d-aec5-445c-82be-75954a52176b","Type":"ContainerStarted","Data":"7355487718b73b32e4835a0b6e6e489ad2ddeea408ac9845b3b5811139ccf3a6"} Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.840923 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-x6mx8" event={"ID":"7f111e4f-54f4-4964-8ccf-d6336884eeec","Type":"ContainerStarted","Data":"888427a89d1094a88cd9d0d3354cd84b4fc6535220b85715be3a729244914454"} Dec 10 00:54:25 crc kubenswrapper[4884]: W1210 00:54:25.890476 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6df655e4_396f_4fc7_8b31_ae3ee55134f2.slice/crio-0194396c22131db01980d9ea427639aa5c1400ed5f74ab3de837a4373becdb0d WatchSource:0}: Error finding container 0194396c22131db01980d9ea427639aa5c1400ed5f74ab3de837a4373becdb0d: Status 404 returned error can't find the container with id 0194396c22131db01980d9ea427639aa5c1400ed5f74ab3de837a4373becdb0d Dec 10 00:54:25 crc kubenswrapper[4884]: W1210 00:54:25.892109 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb6e587fb_bea6_4152_8dbe_b8aa7c6e203a.slice/crio-b420b5d3ba7fabb573c0a7f7bd10af14f7c091192de801fd84583ce7513b5543 WatchSource:0}: Error finding container b420b5d3ba7fabb573c0a7f7bd10af14f7c091192de801fd84583ce7513b5543: Status 404 returned error can't find the container with id b420b5d3ba7fabb573c0a7f7bd10af14f7c091192de801fd84583ce7513b5543 Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.895634 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.923008 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bd4dd" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.961328 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b13f3d75-9027-465c-bbc4-5195fa3f24e7-additional-scripts\") pod \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.961394 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b13f3d75-9027-465c-bbc4-5195fa3f24e7-scripts\") pod \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.961466 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-run\") pod \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.961549 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mn9hf\" (UniqueName: \"kubernetes.io/projected/b13f3d75-9027-465c-bbc4-5195fa3f24e7-kube-api-access-mn9hf\") pod \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.961607 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-run-ovn\") pod \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.961702 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-log-ovn\") pod \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\" (UID: \"b13f3d75-9027-465c-bbc4-5195fa3f24e7\") " Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.962079 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-run" (OuterVolumeSpecName: "var-run") pod "b13f3d75-9027-465c-bbc4-5195fa3f24e7" (UID: "b13f3d75-9027-465c-bbc4-5195fa3f24e7"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.962147 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "b13f3d75-9027-465c-bbc4-5195fa3f24e7" (UID: "b13f3d75-9027-465c-bbc4-5195fa3f24e7"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.962440 4884 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-log-ovn\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.962566 4884 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-run\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.963555 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "b13f3d75-9027-465c-bbc4-5195fa3f24e7" (UID: "b13f3d75-9027-465c-bbc4-5195fa3f24e7"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.963796 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b13f3d75-9027-465c-bbc4-5195fa3f24e7-scripts" (OuterVolumeSpecName: "scripts") pod "b13f3d75-9027-465c-bbc4-5195fa3f24e7" (UID: "b13f3d75-9027-465c-bbc4-5195fa3f24e7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.963858 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b13f3d75-9027-465c-bbc4-5195fa3f24e7-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "b13f3d75-9027-465c-bbc4-5195fa3f24e7" (UID: "b13f3d75-9027-465c-bbc4-5195fa3f24e7"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:25 crc kubenswrapper[4884]: I1210 00:54:25.967903 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b13f3d75-9027-465c-bbc4-5195fa3f24e7-kube-api-access-mn9hf" (OuterVolumeSpecName: "kube-api-access-mn9hf") pod "b13f3d75-9027-465c-bbc4-5195fa3f24e7" (UID: "b13f3d75-9027-465c-bbc4-5195fa3f24e7"). InnerVolumeSpecName "kube-api-access-mn9hf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.064004 4884 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b13f3d75-9027-465c-bbc4-5195fa3f24e7-additional-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.064032 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b13f3d75-9027-465c-bbc4-5195fa3f24e7-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.064042 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mn9hf\" (UniqueName: \"kubernetes.io/projected/b13f3d75-9027-465c-bbc4-5195fa3f24e7-kube-api-access-mn9hf\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.064052 4884 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b13f3d75-9027-465c-bbc4-5195fa3f24e7-var-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.483205 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-577tv" Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.564555 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-bd4dd"] Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.581260 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.867371 4884 generic.go:334] "Generic (PLEG): container finished" podID="4b6ae20d-aec5-445c-82be-75954a52176b" containerID="c326ce590d92f55184399382e5bc4bef037cb66663e9febd88f0b5c6f2cfbc20" exitCode=0 Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.868302 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-9058-account-create-update-sfww4" event={"ID":"4b6ae20d-aec5-445c-82be-75954a52176b","Type":"ContainerDied","Data":"c326ce590d92f55184399382e5bc4bef037cb66663e9febd88f0b5c6f2cfbc20"} Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.872858 4884 generic.go:334] "Generic (PLEG): container finished" podID="6df655e4-396f-4fc7-8b31-ae3ee55134f2" containerID="d35b36d9ab6c1d8fb5e9c440a0e5d7303c2236ea19b4b2151ee72d4286279707" exitCode=0 Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.872913 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-sz9lb" event={"ID":"6df655e4-396f-4fc7-8b31-ae3ee55134f2","Type":"ContainerDied","Data":"d35b36d9ab6c1d8fb5e9c440a0e5d7303c2236ea19b4b2151ee72d4286279707"} Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.872928 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-sz9lb" event={"ID":"6df655e4-396f-4fc7-8b31-ae3ee55134f2","Type":"ContainerStarted","Data":"0194396c22131db01980d9ea427639aa5c1400ed5f74ab3de837a4373becdb0d"} Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.874512 4884 generic.go:334] "Generic (PLEG): container finished" podID="7f111e4f-54f4-4964-8ccf-d6336884eeec" containerID="9319b0156366d80f8fd9d46684501f4be1f30bef0ec555307a572178115e10fb" exitCode=0 Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.874547 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-x6mx8" event={"ID":"7f111e4f-54f4-4964-8ccf-d6336884eeec","Type":"ContainerDied","Data":"9319b0156366d80f8fd9d46684501f4be1f30bef0ec555307a572178115e10fb"} Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.876818 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9879cc81-0cad-4e77-90e5-46afd9adb241","Type":"ContainerStarted","Data":"ff6db0f9b13951824e620b61c2d4f0157276e91bb1bff3a87e9a00affaf2f402"} Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.881681 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bd4dd" event={"ID":"db50dd06-d67c-468e-88de-6a8fb86bd1bd","Type":"ContainerStarted","Data":"ebc911aa90c4189eab34474f53da7da8538de5caf4a8a92f165e8c305d2b3fe0"} Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.883200 4884 generic.go:334] "Generic (PLEG): container finished" podID="b6e587fb-bea6-4152-8dbe-b8aa7c6e203a" containerID="4b196f78d47d05462e74d7d0a4712868a11d6964660de3b9d97da0a7c8e3aefb" exitCode=0 Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.883295 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-577tv-config-qszf8" Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.886308 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-ad48-account-create-update-rdb85" event={"ID":"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a","Type":"ContainerDied","Data":"4b196f78d47d05462e74d7d0a4712868a11d6964660de3b9d97da0a7c8e3aefb"} Dec 10 00:54:26 crc kubenswrapper[4884]: I1210 00:54:26.886362 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-ad48-account-create-update-rdb85" event={"ID":"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a","Type":"ContainerStarted","Data":"b420b5d3ba7fabb573c0a7f7bd10af14f7c091192de801fd84583ce7513b5543"} Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.026650 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-577tv-config-qszf8"] Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.040615 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-577tv-config-qszf8"] Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.119708 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-577tv-config-gnbfs"] Dec 10 00:54:27 crc kubenswrapper[4884]: E1210 00:54:27.120177 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b13f3d75-9027-465c-bbc4-5195fa3f24e7" containerName="ovn-config" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.120192 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b13f3d75-9027-465c-bbc4-5195fa3f24e7" containerName="ovn-config" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.120416 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b13f3d75-9027-465c-bbc4-5195fa3f24e7" containerName="ovn-config" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.133044 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-577tv-config-gnbfs"] Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.133153 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.154319 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.189853 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5"] Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.191127 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.237258 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5"] Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.295128 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-run-ovn\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.295218 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b4091c4b-edf7-435c-9fbd-450e53dd9fed-additional-scripts\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.295243 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-run\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.295515 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9f9f092-f6ef-4316-bc6a-71bdee41cec3-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-jhkd5\" (UID: \"b9f9f092-f6ef-4316-bc6a-71bdee41cec3\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.295590 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b4091c4b-edf7-435c-9fbd-450e53dd9fed-scripts\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.295646 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sjbh\" (UniqueName: \"kubernetes.io/projected/b9f9f092-f6ef-4316-bc6a-71bdee41cec3-kube-api-access-8sjbh\") pod \"mysqld-exporter-openstack-cell1-db-create-jhkd5\" (UID: \"b9f9f092-f6ef-4316-bc6a-71bdee41cec3\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.295742 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8887w\" (UniqueName: \"kubernetes.io/projected/b4091c4b-edf7-435c-9fbd-450e53dd9fed-kube-api-access-8887w\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.295888 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-log-ovn\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.306342 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b13f3d75-9027-465c-bbc4-5195fa3f24e7" path="/var/lib/kubelet/pods/b13f3d75-9027-465c-bbc4-5195fa3f24e7/volumes" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.386521 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-7751-account-create-update-2bn8l"] Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.388091 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.390708 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-openstack-cell1-db-secret" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.396496 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-7751-account-create-update-2bn8l"] Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.397406 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8887w\" (UniqueName: \"kubernetes.io/projected/b4091c4b-edf7-435c-9fbd-450e53dd9fed-kube-api-access-8887w\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.397529 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-log-ovn\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.397595 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-run-ovn\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.397644 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b4091c4b-edf7-435c-9fbd-450e53dd9fed-additional-scripts\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.397674 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-run\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.397735 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9f9f092-f6ef-4316-bc6a-71bdee41cec3-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-jhkd5\" (UID: \"b9f9f092-f6ef-4316-bc6a-71bdee41cec3\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.397760 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b4091c4b-edf7-435c-9fbd-450e53dd9fed-scripts\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.397781 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sjbh\" (UniqueName: \"kubernetes.io/projected/b9f9f092-f6ef-4316-bc6a-71bdee41cec3-kube-api-access-8sjbh\") pod \"mysqld-exporter-openstack-cell1-db-create-jhkd5\" (UID: \"b9f9f092-f6ef-4316-bc6a-71bdee41cec3\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.398082 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-log-ovn\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.398205 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-run\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.398240 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-run-ovn\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.398663 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9f9f092-f6ef-4316-bc6a-71bdee41cec3-operator-scripts\") pod \"mysqld-exporter-openstack-cell1-db-create-jhkd5\" (UID: \"b9f9f092-f6ef-4316-bc6a-71bdee41cec3\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.401106 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b4091c4b-edf7-435c-9fbd-450e53dd9fed-scripts\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.404208 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.408860 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b4091c4b-edf7-435c-9fbd-450e53dd9fed-additional-scripts\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.432207 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8887w\" (UniqueName: \"kubernetes.io/projected/b4091c4b-edf7-435c-9fbd-450e53dd9fed-kube-api-access-8887w\") pod \"ovn-controller-577tv-config-gnbfs\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.443542 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sjbh\" (UniqueName: \"kubernetes.io/projected/b9f9f092-f6ef-4316-bc6a-71bdee41cec3-kube-api-access-8sjbh\") pod \"mysqld-exporter-openstack-cell1-db-create-jhkd5\" (UID: \"b9f9f092-f6ef-4316-bc6a-71bdee41cec3\") " pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.454063 4884 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","poda2b91caf-ce98-4997-9f34-62031f4fb1f3"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort poda2b91caf-ce98-4997-9f34-62031f4fb1f3] : Timed out while waiting for systemd to remove kubepods-besteffort-poda2b91caf_ce98_4997_9f34_62031f4fb1f3.slice" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.468135 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.503749 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b01476d4-2f54-49f9-a292-f4c39fcc215b-operator-scripts\") pod \"mysqld-exporter-7751-account-create-update-2bn8l\" (UID: \"b01476d4-2f54-49f9-a292-f4c39fcc215b\") " pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.503869 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wtjd\" (UniqueName: \"kubernetes.io/projected/b01476d4-2f54-49f9-a292-f4c39fcc215b-kube-api-access-2wtjd\") pod \"mysqld-exporter-7751-account-create-update-2bn8l\" (UID: \"b01476d4-2f54-49f9-a292-f4c39fcc215b\") " pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.507463 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.605683 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wtjd\" (UniqueName: \"kubernetes.io/projected/b01476d4-2f54-49f9-a292-f4c39fcc215b-kube-api-access-2wtjd\") pod \"mysqld-exporter-7751-account-create-update-2bn8l\" (UID: \"b01476d4-2f54-49f9-a292-f4c39fcc215b\") " pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.605829 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b01476d4-2f54-49f9-a292-f4c39fcc215b-operator-scripts\") pod \"mysqld-exporter-7751-account-create-update-2bn8l\" (UID: \"b01476d4-2f54-49f9-a292-f4c39fcc215b\") " pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.606576 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b01476d4-2f54-49f9-a292-f4c39fcc215b-operator-scripts\") pod \"mysqld-exporter-7751-account-create-update-2bn8l\" (UID: \"b01476d4-2f54-49f9-a292-f4c39fcc215b\") " pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.627778 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wtjd\" (UniqueName: \"kubernetes.io/projected/b01476d4-2f54-49f9-a292-f4c39fcc215b-kube-api-access-2wtjd\") pod \"mysqld-exporter-7751-account-create-update-2bn8l\" (UID: \"b01476d4-2f54-49f9-a292-f4c39fcc215b\") " pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" Dec 10 00:54:27 crc kubenswrapper[4884]: I1210 00:54:27.703734 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.086370 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5"] Dec 10 00:54:28 crc kubenswrapper[4884]: W1210 00:54:28.091741 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9f9f092_f6ef_4316_bc6a_71bdee41cec3.slice/crio-fedc93d5a9ff262c6dae4c54378161b6943b107a712678036cfbb3d2ab078051 WatchSource:0}: Error finding container fedc93d5a9ff262c6dae4c54378161b6943b107a712678036cfbb3d2ab078051: Status 404 returned error can't find the container with id fedc93d5a9ff262c6dae4c54378161b6943b107a712678036cfbb3d2ab078051 Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.170349 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-577tv-config-gnbfs"] Dec 10 00:54:28 crc kubenswrapper[4884]: W1210 00:54:28.183118 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4091c4b_edf7_435c_9fbd_450e53dd9fed.slice/crio-f7bd42cc78cc98f1d52d1cfed1963badd037385a938fc9def5bba895113c102b WatchSource:0}: Error finding container f7bd42cc78cc98f1d52d1cfed1963badd037385a938fc9def5bba895113c102b: Status 404 returned error can't find the container with id f7bd42cc78cc98f1d52d1cfed1963badd037385a938fc9def5bba895113c102b Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.613574 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-7751-account-create-update-2bn8l"] Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.767913 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-9058-account-create-update-sfww4" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.772155 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-ad48-account-create-update-rdb85" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.784337 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-sz9lb" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.790498 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-x6mx8" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.828701 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4gwh\" (UniqueName: \"kubernetes.io/projected/4b6ae20d-aec5-445c-82be-75954a52176b-kube-api-access-w4gwh\") pod \"4b6ae20d-aec5-445c-82be-75954a52176b\" (UID: \"4b6ae20d-aec5-445c-82be-75954a52176b\") " Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.828865 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b6ae20d-aec5-445c-82be-75954a52176b-operator-scripts\") pod \"4b6ae20d-aec5-445c-82be-75954a52176b\" (UID: \"4b6ae20d-aec5-445c-82be-75954a52176b\") " Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.829724 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b6ae20d-aec5-445c-82be-75954a52176b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4b6ae20d-aec5-445c-82be-75954a52176b" (UID: "4b6ae20d-aec5-445c-82be-75954a52176b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.836062 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b6ae20d-aec5-445c-82be-75954a52176b-kube-api-access-w4gwh" (OuterVolumeSpecName: "kube-api-access-w4gwh") pod "4b6ae20d-aec5-445c-82be-75954a52176b" (UID: "4b6ae20d-aec5-445c-82be-75954a52176b"). InnerVolumeSpecName "kube-api-access-w4gwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.919332 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" event={"ID":"b01476d4-2f54-49f9-a292-f4c39fcc215b","Type":"ContainerStarted","Data":"c0fcd1f9d043c430ad2cddffe32e287ca338c67eac269cc90e1d7eae365f13aa"} Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.919376 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" event={"ID":"b01476d4-2f54-49f9-a292-f4c39fcc215b","Type":"ContainerStarted","Data":"1a58275d4045513b088827061681dfabf81c45d145e276a29a244c455894b1dd"} Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.922500 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-ad48-account-create-update-rdb85" event={"ID":"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a","Type":"ContainerDied","Data":"b420b5d3ba7fabb573c0a7f7bd10af14f7c091192de801fd84583ce7513b5543"} Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.922535 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b420b5d3ba7fabb573c0a7f7bd10af14f7c091192de801fd84583ce7513b5543" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.922628 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-ad48-account-create-update-rdb85" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.927939 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-9058-account-create-update-sfww4" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.927934 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-9058-account-create-update-sfww4" event={"ID":"4b6ae20d-aec5-445c-82be-75954a52176b","Type":"ContainerDied","Data":"7355487718b73b32e4835a0b6e6e489ad2ddeea408ac9845b3b5811139ccf3a6"} Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.928077 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7355487718b73b32e4835a0b6e6e489ad2ddeea408ac9845b3b5811139ccf3a6" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.929973 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75856\" (UniqueName: \"kubernetes.io/projected/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a-kube-api-access-75856\") pod \"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a\" (UID: \"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a\") " Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.930005 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pd55p\" (UniqueName: \"kubernetes.io/projected/7f111e4f-54f4-4964-8ccf-d6336884eeec-kube-api-access-pd55p\") pod \"7f111e4f-54f4-4964-8ccf-d6336884eeec\" (UID: \"7f111e4f-54f4-4964-8ccf-d6336884eeec\") " Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.930107 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6msll\" (UniqueName: \"kubernetes.io/projected/6df655e4-396f-4fc7-8b31-ae3ee55134f2-kube-api-access-6msll\") pod \"6df655e4-396f-4fc7-8b31-ae3ee55134f2\" (UID: \"6df655e4-396f-4fc7-8b31-ae3ee55134f2\") " Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.930138 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6df655e4-396f-4fc7-8b31-ae3ee55134f2-operator-scripts\") pod \"6df655e4-396f-4fc7-8b31-ae3ee55134f2\" (UID: \"6df655e4-396f-4fc7-8b31-ae3ee55134f2\") " Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.930179 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a-operator-scripts\") pod \"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a\" (UID: \"b6e587fb-bea6-4152-8dbe-b8aa7c6e203a\") " Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.930201 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f111e4f-54f4-4964-8ccf-d6336884eeec-operator-scripts\") pod \"7f111e4f-54f4-4964-8ccf-d6336884eeec\" (UID: \"7f111e4f-54f4-4964-8ccf-d6336884eeec\") " Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.930593 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4gwh\" (UniqueName: \"kubernetes.io/projected/4b6ae20d-aec5-445c-82be-75954a52176b-kube-api-access-w4gwh\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.930605 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b6ae20d-aec5-445c-82be-75954a52176b-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.931072 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f111e4f-54f4-4964-8ccf-d6336884eeec-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7f111e4f-54f4-4964-8ccf-d6336884eeec" (UID: "7f111e4f-54f4-4964-8ccf-d6336884eeec"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.931270 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-x6mx8" event={"ID":"7f111e4f-54f4-4964-8ccf-d6336884eeec","Type":"ContainerDied","Data":"888427a89d1094a88cd9d0d3354cd84b4fc6535220b85715be3a729244914454"} Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.931338 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="888427a89d1094a88cd9d0d3354cd84b4fc6535220b85715be3a729244914454" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.931313 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6df655e4-396f-4fc7-8b31-ae3ee55134f2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6df655e4-396f-4fc7-8b31-ae3ee55134f2" (UID: "6df655e4-396f-4fc7-8b31-ae3ee55134f2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.931390 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-x6mx8" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.931528 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b6e587fb-bea6-4152-8dbe-b8aa7c6e203a" (UID: "b6e587fb-bea6-4152-8dbe-b8aa7c6e203a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.933118 4884 generic.go:334] "Generic (PLEG): container finished" podID="b9f9f092-f6ef-4316-bc6a-71bdee41cec3" containerID="69421897fee06d879786d40a3e2b8304efae29600cc308dbed8b68d72c7a9247" exitCode=0 Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.933165 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" event={"ID":"b9f9f092-f6ef-4316-bc6a-71bdee41cec3","Type":"ContainerDied","Data":"69421897fee06d879786d40a3e2b8304efae29600cc308dbed8b68d72c7a9247"} Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.933180 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" event={"ID":"b9f9f092-f6ef-4316-bc6a-71bdee41cec3","Type":"ContainerStarted","Data":"fedc93d5a9ff262c6dae4c54378161b6943b107a712678036cfbb3d2ab078051"} Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.937968 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a-kube-api-access-75856" (OuterVolumeSpecName: "kube-api-access-75856") pod "b6e587fb-bea6-4152-8dbe-b8aa7c6e203a" (UID: "b6e587fb-bea6-4152-8dbe-b8aa7c6e203a"). InnerVolumeSpecName "kube-api-access-75856". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.938014 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6df655e4-396f-4fc7-8b31-ae3ee55134f2-kube-api-access-6msll" (OuterVolumeSpecName: "kube-api-access-6msll") pod "6df655e4-396f-4fc7-8b31-ae3ee55134f2" (UID: "6df655e4-396f-4fc7-8b31-ae3ee55134f2"). InnerVolumeSpecName "kube-api-access-6msll". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.938037 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f111e4f-54f4-4964-8ccf-d6336884eeec-kube-api-access-pd55p" (OuterVolumeSpecName: "kube-api-access-pd55p") pod "7f111e4f-54f4-4964-8ccf-d6336884eeec" (UID: "7f111e4f-54f4-4964-8ccf-d6336884eeec"). InnerVolumeSpecName "kube-api-access-pd55p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.940094 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-sz9lb" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.940135 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-sz9lb" event={"ID":"6df655e4-396f-4fc7-8b31-ae3ee55134f2","Type":"ContainerDied","Data":"0194396c22131db01980d9ea427639aa5c1400ed5f74ab3de837a4373becdb0d"} Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.940167 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0194396c22131db01980d9ea427639aa5c1400ed5f74ab3de837a4373becdb0d" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.942567 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-577tv-config-gnbfs" event={"ID":"b4091c4b-edf7-435c-9fbd-450e53dd9fed","Type":"ContainerStarted","Data":"e2c3c1f94be9b1b02accfe5e6b6384942ba2fedea4b7602541e3ffaaa879fcac"} Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.942593 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-577tv-config-gnbfs" event={"ID":"b4091c4b-edf7-435c-9fbd-450e53dd9fed","Type":"ContainerStarted","Data":"f7bd42cc78cc98f1d52d1cfed1963badd037385a938fc9def5bba895113c102b"} Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.946513 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" podStartSLOduration=1.946497732 podStartE2EDuration="1.946497732s" podCreationTimestamp="2025-12-10 00:54:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:54:28.938241028 +0000 UTC m=+1442.016198155" watchObservedRunningTime="2025-12-10 00:54:28.946497732 +0000 UTC m=+1442.024454849" Dec 10 00:54:28 crc kubenswrapper[4884]: I1210 00:54:28.974146 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-577tv-config-gnbfs" podStartSLOduration=1.97413023 podStartE2EDuration="1.97413023s" podCreationTimestamp="2025-12-10 00:54:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:54:28.970761929 +0000 UTC m=+1442.048719046" watchObservedRunningTime="2025-12-10 00:54:28.97413023 +0000 UTC m=+1442.052087347" Dec 10 00:54:29 crc kubenswrapper[4884]: I1210 00:54:29.032566 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75856\" (UniqueName: \"kubernetes.io/projected/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a-kube-api-access-75856\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:29 crc kubenswrapper[4884]: I1210 00:54:29.032821 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pd55p\" (UniqueName: \"kubernetes.io/projected/7f111e4f-54f4-4964-8ccf-d6336884eeec-kube-api-access-pd55p\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:29 crc kubenswrapper[4884]: I1210 00:54:29.032832 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6msll\" (UniqueName: \"kubernetes.io/projected/6df655e4-396f-4fc7-8b31-ae3ee55134f2-kube-api-access-6msll\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:29 crc kubenswrapper[4884]: I1210 00:54:29.032840 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6df655e4-396f-4fc7-8b31-ae3ee55134f2-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:29 crc kubenswrapper[4884]: I1210 00:54:29.032850 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:29 crc kubenswrapper[4884]: I1210 00:54:29.032861 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f111e4f-54f4-4964-8ccf-d6336884eeec-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:29 crc kubenswrapper[4884]: I1210 00:54:29.953373 4884 generic.go:334] "Generic (PLEG): container finished" podID="b01476d4-2f54-49f9-a292-f4c39fcc215b" containerID="c0fcd1f9d043c430ad2cddffe32e287ca338c67eac269cc90e1d7eae365f13aa" exitCode=0 Dec 10 00:54:29 crc kubenswrapper[4884]: I1210 00:54:29.953434 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" event={"ID":"b01476d4-2f54-49f9-a292-f4c39fcc215b","Type":"ContainerDied","Data":"c0fcd1f9d043c430ad2cddffe32e287ca338c67eac269cc90e1d7eae365f13aa"} Dec 10 00:54:29 crc kubenswrapper[4884]: I1210 00:54:29.957672 4884 generic.go:334] "Generic (PLEG): container finished" podID="b4091c4b-edf7-435c-9fbd-450e53dd9fed" containerID="e2c3c1f94be9b1b02accfe5e6b6384942ba2fedea4b7602541e3ffaaa879fcac" exitCode=0 Dec 10 00:54:29 crc kubenswrapper[4884]: I1210 00:54:29.957833 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-577tv-config-gnbfs" event={"ID":"b4091c4b-edf7-435c-9fbd-450e53dd9fed","Type":"ContainerDied","Data":"e2c3c1f94be9b1b02accfe5e6b6384942ba2fedea4b7602541e3ffaaa879fcac"} Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.160091 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.173586 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/77e9a322-ea03-4101-b8be-d1e09f67e8c2-etc-swift\") pod \"swift-storage-0\" (UID: \"77e9a322-ea03-4101-b8be-d1e09f67e8c2\") " pod="openstack/swift-storage-0" Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.235979 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.335920 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.464741 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9f9f092-f6ef-4316-bc6a-71bdee41cec3-operator-scripts\") pod \"b9f9f092-f6ef-4316-bc6a-71bdee41cec3\" (UID: \"b9f9f092-f6ef-4316-bc6a-71bdee41cec3\") " Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.464915 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8sjbh\" (UniqueName: \"kubernetes.io/projected/b9f9f092-f6ef-4316-bc6a-71bdee41cec3-kube-api-access-8sjbh\") pod \"b9f9f092-f6ef-4316-bc6a-71bdee41cec3\" (UID: \"b9f9f092-f6ef-4316-bc6a-71bdee41cec3\") " Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.465464 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9f9f092-f6ef-4316-bc6a-71bdee41cec3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b9f9f092-f6ef-4316-bc6a-71bdee41cec3" (UID: "b9f9f092-f6ef-4316-bc6a-71bdee41cec3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.470073 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9f9f092-f6ef-4316-bc6a-71bdee41cec3-kube-api-access-8sjbh" (OuterVolumeSpecName: "kube-api-access-8sjbh") pod "b9f9f092-f6ef-4316-bc6a-71bdee41cec3" (UID: "b9f9f092-f6ef-4316-bc6a-71bdee41cec3"). InnerVolumeSpecName "kube-api-access-8sjbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.567286 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b9f9f092-f6ef-4316-bc6a-71bdee41cec3-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.567333 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8sjbh\" (UniqueName: \"kubernetes.io/projected/b9f9f092-f6ef-4316-bc6a-71bdee41cec3-kube-api-access-8sjbh\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.752931 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 10 00:54:30 crc kubenswrapper[4884]: W1210 00:54:30.759545 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77e9a322_ea03_4101_b8be_d1e09f67e8c2.slice/crio-9c8a52ac6834a30d7242c34bf8e1b0b97225050da7c9fc02a25d4bc831912509 WatchSource:0}: Error finding container 9c8a52ac6834a30d7242c34bf8e1b0b97225050da7c9fc02a25d4bc831912509: Status 404 returned error can't find the container with id 9c8a52ac6834a30d7242c34bf8e1b0b97225050da7c9fc02a25d4bc831912509 Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.975762 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" event={"ID":"b9f9f092-f6ef-4316-bc6a-71bdee41cec3","Type":"ContainerDied","Data":"fedc93d5a9ff262c6dae4c54378161b6943b107a712678036cfbb3d2ab078051"} Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.976034 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fedc93d5a9ff262c6dae4c54378161b6943b107a712678036cfbb3d2ab078051" Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.975850 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5" Dec 10 00:54:30 crc kubenswrapper[4884]: I1210 00:54:30.978264 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"9c8a52ac6834a30d7242c34bf8e1b0b97225050da7c9fc02a25d4bc831912509"} Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.417666 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.528445 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.534938 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.589520 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-run-ovn\") pod \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.589570 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wtjd\" (UniqueName: \"kubernetes.io/projected/b01476d4-2f54-49f9-a292-f4c39fcc215b-kube-api-access-2wtjd\") pod \"b01476d4-2f54-49f9-a292-f4c39fcc215b\" (UID: \"b01476d4-2f54-49f9-a292-f4c39fcc215b\") " Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.589622 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b4091c4b-edf7-435c-9fbd-450e53dd9fed-additional-scripts\") pod \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.589640 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8887w\" (UniqueName: \"kubernetes.io/projected/b4091c4b-edf7-435c-9fbd-450e53dd9fed-kube-api-access-8887w\") pod \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.589655 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "b4091c4b-edf7-435c-9fbd-450e53dd9fed" (UID: "b4091c4b-edf7-435c-9fbd-450e53dd9fed"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.589687 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b01476d4-2f54-49f9-a292-f4c39fcc215b-operator-scripts\") pod \"b01476d4-2f54-49f9-a292-f4c39fcc215b\" (UID: \"b01476d4-2f54-49f9-a292-f4c39fcc215b\") " Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.589880 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-log-ovn\") pod \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.589925 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "b4091c4b-edf7-435c-9fbd-450e53dd9fed" (UID: "b4091c4b-edf7-435c-9fbd-450e53dd9fed"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.589944 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-run\") pod \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.590000 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b4091c4b-edf7-435c-9fbd-450e53dd9fed-scripts\") pod \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\" (UID: \"b4091c4b-edf7-435c-9fbd-450e53dd9fed\") " Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.590071 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-run" (OuterVolumeSpecName: "var-run") pod "b4091c4b-edf7-435c-9fbd-450e53dd9fed" (UID: "b4091c4b-edf7-435c-9fbd-450e53dd9fed"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.590552 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b01476d4-2f54-49f9-a292-f4c39fcc215b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b01476d4-2f54-49f9-a292-f4c39fcc215b" (UID: "b01476d4-2f54-49f9-a292-f4c39fcc215b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.590644 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4091c4b-edf7-435c-9fbd-450e53dd9fed-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "b4091c4b-edf7-435c-9fbd-450e53dd9fed" (UID: "b4091c4b-edf7-435c-9fbd-450e53dd9fed"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.590662 4884 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-log-ovn\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.590681 4884 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-run\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.590692 4884 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b4091c4b-edf7-435c-9fbd-450e53dd9fed-var-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.590703 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b01476d4-2f54-49f9-a292-f4c39fcc215b-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.591620 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4091c4b-edf7-435c-9fbd-450e53dd9fed-scripts" (OuterVolumeSpecName: "scripts") pod "b4091c4b-edf7-435c-9fbd-450e53dd9fed" (UID: "b4091c4b-edf7-435c-9fbd-450e53dd9fed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.597414 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4091c4b-edf7-435c-9fbd-450e53dd9fed-kube-api-access-8887w" (OuterVolumeSpecName: "kube-api-access-8887w") pod "b4091c4b-edf7-435c-9fbd-450e53dd9fed" (UID: "b4091c4b-edf7-435c-9fbd-450e53dd9fed"). InnerVolumeSpecName "kube-api-access-8887w". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.602590 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b01476d4-2f54-49f9-a292-f4c39fcc215b-kube-api-access-2wtjd" (OuterVolumeSpecName: "kube-api-access-2wtjd") pod "b01476d4-2f54-49f9-a292-f4c39fcc215b" (UID: "b01476d4-2f54-49f9-a292-f4c39fcc215b"). InnerVolumeSpecName "kube-api-access-2wtjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.692191 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b4091c4b-edf7-435c-9fbd-450e53dd9fed-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.692226 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wtjd\" (UniqueName: \"kubernetes.io/projected/b01476d4-2f54-49f9-a292-f4c39fcc215b-kube-api-access-2wtjd\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.692237 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8887w\" (UniqueName: \"kubernetes.io/projected/b4091c4b-edf7-435c-9fbd-450e53dd9fed-kube-api-access-8887w\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.692245 4884 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/b4091c4b-edf7-435c-9fbd-450e53dd9fed-additional-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.860641 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.996285 4884 generic.go:334] "Generic (PLEG): container finished" podID="9879cc81-0cad-4e77-90e5-46afd9adb241" containerID="ff6db0f9b13951824e620b61c2d4f0157276e91bb1bff3a87e9a00affaf2f402" exitCode=0 Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.996350 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9879cc81-0cad-4e77-90e5-46afd9adb241","Type":"ContainerDied","Data":"ff6db0f9b13951824e620b61c2d4f0157276e91bb1bff3a87e9a00affaf2f402"} Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.998492 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-577tv-config-gnbfs" event={"ID":"b4091c4b-edf7-435c-9fbd-450e53dd9fed","Type":"ContainerDied","Data":"f7bd42cc78cc98f1d52d1cfed1963badd037385a938fc9def5bba895113c102b"} Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.998523 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7bd42cc78cc98f1d52d1cfed1963badd037385a938fc9def5bba895113c102b" Dec 10 00:54:31 crc kubenswrapper[4884]: I1210 00:54:31.998582 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-577tv-config-gnbfs" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.006795 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" event={"ID":"b01476d4-2f54-49f9-a292-f4c39fcc215b","Type":"ContainerDied","Data":"1a58275d4045513b088827061681dfabf81c45d145e276a29a244c455894b1dd"} Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.006839 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a58275d4045513b088827061681dfabf81c45d145e276a29a244c455894b1dd" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.006843 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-7751-account-create-update-2bn8l" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578146 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 00:54:32 crc kubenswrapper[4884]: E1210 00:54:32.578553 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b6ae20d-aec5-445c-82be-75954a52176b" containerName="mariadb-account-create-update" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578577 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b6ae20d-aec5-445c-82be-75954a52176b" containerName="mariadb-account-create-update" Dec 10 00:54:32 crc kubenswrapper[4884]: E1210 00:54:32.578588 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4091c4b-edf7-435c-9fbd-450e53dd9fed" containerName="ovn-config" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578596 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4091c4b-edf7-435c-9fbd-450e53dd9fed" containerName="ovn-config" Dec 10 00:54:32 crc kubenswrapper[4884]: E1210 00:54:32.578619 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b01476d4-2f54-49f9-a292-f4c39fcc215b" containerName="mariadb-account-create-update" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578626 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b01476d4-2f54-49f9-a292-f4c39fcc215b" containerName="mariadb-account-create-update" Dec 10 00:54:32 crc kubenswrapper[4884]: E1210 00:54:32.578636 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6df655e4-396f-4fc7-8b31-ae3ee55134f2" containerName="mariadb-database-create" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578643 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6df655e4-396f-4fc7-8b31-ae3ee55134f2" containerName="mariadb-database-create" Dec 10 00:54:32 crc kubenswrapper[4884]: E1210 00:54:32.578656 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9f9f092-f6ef-4316-bc6a-71bdee41cec3" containerName="mariadb-database-create" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578663 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9f9f092-f6ef-4316-bc6a-71bdee41cec3" containerName="mariadb-database-create" Dec 10 00:54:32 crc kubenswrapper[4884]: E1210 00:54:32.578672 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6e587fb-bea6-4152-8dbe-b8aa7c6e203a" containerName="mariadb-account-create-update" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578679 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6e587fb-bea6-4152-8dbe-b8aa7c6e203a" containerName="mariadb-account-create-update" Dec 10 00:54:32 crc kubenswrapper[4884]: E1210 00:54:32.578690 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f111e4f-54f4-4964-8ccf-d6336884eeec" containerName="mariadb-database-create" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578696 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f111e4f-54f4-4964-8ccf-d6336884eeec" containerName="mariadb-database-create" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578846 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f111e4f-54f4-4964-8ccf-d6336884eeec" containerName="mariadb-database-create" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578858 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9f9f092-f6ef-4316-bc6a-71bdee41cec3" containerName="mariadb-database-create" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578870 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4091c4b-edf7-435c-9fbd-450e53dd9fed" containerName="ovn-config" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578882 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b01476d4-2f54-49f9-a292-f4c39fcc215b" containerName="mariadb-account-create-update" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578897 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b6ae20d-aec5-445c-82be-75954a52176b" containerName="mariadb-account-create-update" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578903 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6df655e4-396f-4fc7-8b31-ae3ee55134f2" containerName="mariadb-database-create" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.578910 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6e587fb-bea6-4152-8dbe-b8aa7c6e203a" containerName="mariadb-account-create-update" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.579530 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.583568 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.595393 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.612919 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\") " pod="openstack/mysqld-exporter-0" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.612987 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-config-data\") pod \"mysqld-exporter-0\" (UID: \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\") " pod="openstack/mysqld-exporter-0" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.613090 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9npz7\" (UniqueName: \"kubernetes.io/projected/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-kube-api-access-9npz7\") pod \"mysqld-exporter-0\" (UID: \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\") " pod="openstack/mysqld-exporter-0" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.716622 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\") " pod="openstack/mysqld-exporter-0" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.716683 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-config-data\") pod \"mysqld-exporter-0\" (UID: \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\") " pod="openstack/mysqld-exporter-0" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.716787 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9npz7\" (UniqueName: \"kubernetes.io/projected/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-kube-api-access-9npz7\") pod \"mysqld-exporter-0\" (UID: \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\") " pod="openstack/mysqld-exporter-0" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.729552 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-577tv-config-gnbfs"] Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.748256 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-577tv-config-gnbfs"] Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.781495 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-config-data\") pod \"mysqld-exporter-0\" (UID: \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\") " pod="openstack/mysqld-exporter-0" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.782686 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\") " pod="openstack/mysqld-exporter-0" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.788197 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9npz7\" (UniqueName: \"kubernetes.io/projected/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-kube-api-access-9npz7\") pod \"mysqld-exporter-0\" (UID: \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\") " pod="openstack/mysqld-exporter-0" Dec 10 00:54:32 crc kubenswrapper[4884]: I1210 00:54:32.896279 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.018846 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9879cc81-0cad-4e77-90e5-46afd9adb241","Type":"ContainerStarted","Data":"2bf626faaf8489358763c906b30baa55488e261752c25905a11d2373684752fe"} Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.301862 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4091c4b-edf7-435c-9fbd-450e53dd9fed" path="/var/lib/kubelet/pods/b4091c4b-edf7-435c-9fbd-450e53dd9fed/volumes" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.669812 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-pm6qf"] Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.671467 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-pm6qf" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.707498 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-0b32-account-create-update-lb248"] Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.715023 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-0b32-account-create-update-lb248" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.717985 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.739904 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43-operator-scripts\") pod \"heat-0b32-account-create-update-lb248\" (UID: \"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43\") " pod="openstack/heat-0b32-account-create-update-lb248" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.740004 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6n2r\" (UniqueName: \"kubernetes.io/projected/9ee033e3-bd08-4fe7-8efb-d6b81e79796c-kube-api-access-l6n2r\") pod \"heat-db-create-pm6qf\" (UID: \"9ee033e3-bd08-4fe7-8efb-d6b81e79796c\") " pod="openstack/heat-db-create-pm6qf" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.740758 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-47z54\" (UniqueName: \"kubernetes.io/projected/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43-kube-api-access-47z54\") pod \"heat-0b32-account-create-update-lb248\" (UID: \"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43\") " pod="openstack/heat-0b32-account-create-update-lb248" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.740819 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ee033e3-bd08-4fe7-8efb-d6b81e79796c-operator-scripts\") pod \"heat-db-create-pm6qf\" (UID: \"9ee033e3-bd08-4fe7-8efb-d6b81e79796c\") " pod="openstack/heat-db-create-pm6qf" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.754577 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-pm6qf"] Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.779501 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-0b32-account-create-update-lb248"] Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.820733 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.830641 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-xwwl8"] Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.832976 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-xwwl8" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.845598 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43-operator-scripts\") pod \"heat-0b32-account-create-update-lb248\" (UID: \"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43\") " pod="openstack/heat-0b32-account-create-update-lb248" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.845671 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6n2r\" (UniqueName: \"kubernetes.io/projected/9ee033e3-bd08-4fe7-8efb-d6b81e79796c-kube-api-access-l6n2r\") pod \"heat-db-create-pm6qf\" (UID: \"9ee033e3-bd08-4fe7-8efb-d6b81e79796c\") " pod="openstack/heat-db-create-pm6qf" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.845729 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-47z54\" (UniqueName: \"kubernetes.io/projected/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43-kube-api-access-47z54\") pod \"heat-0b32-account-create-update-lb248\" (UID: \"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43\") " pod="openstack/heat-0b32-account-create-update-lb248" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.845756 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ee033e3-bd08-4fe7-8efb-d6b81e79796c-operator-scripts\") pod \"heat-db-create-pm6qf\" (UID: \"9ee033e3-bd08-4fe7-8efb-d6b81e79796c\") " pod="openstack/heat-db-create-pm6qf" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.846626 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ee033e3-bd08-4fe7-8efb-d6b81e79796c-operator-scripts\") pod \"heat-db-create-pm6qf\" (UID: \"9ee033e3-bd08-4fe7-8efb-d6b81e79796c\") " pod="openstack/heat-db-create-pm6qf" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.846695 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43-operator-scripts\") pod \"heat-0b32-account-create-update-lb248\" (UID: \"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43\") " pod="openstack/heat-0b32-account-create-update-lb248" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.854618 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-4150-account-create-update-pfkjg"] Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.856242 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4150-account-create-update-pfkjg" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.859005 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.881974 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-xwwl8"] Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.883207 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6n2r\" (UniqueName: \"kubernetes.io/projected/9ee033e3-bd08-4fe7-8efb-d6b81e79796c-kube-api-access-l6n2r\") pod \"heat-db-create-pm6qf\" (UID: \"9ee033e3-bd08-4fe7-8efb-d6b81e79796c\") " pod="openstack/heat-db-create-pm6qf" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.883219 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-47z54\" (UniqueName: \"kubernetes.io/projected/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43-kube-api-access-47z54\") pod \"heat-0b32-account-create-update-lb248\" (UID: \"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43\") " pod="openstack/heat-0b32-account-create-update-lb248" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.903391 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-4150-account-create-update-pfkjg"] Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.947562 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f5b54dd3-a06f-4c77-88d4-88417c5d43eb-operator-scripts\") pod \"barbican-db-create-xwwl8\" (UID: \"f5b54dd3-a06f-4c77-88d4-88417c5d43eb\") " pod="openstack/barbican-db-create-xwwl8" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.947624 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmxhp\" (UniqueName: \"kubernetes.io/projected/f5b54dd3-a06f-4c77-88d4-88417c5d43eb-kube-api-access-gmxhp\") pod \"barbican-db-create-xwwl8\" (UID: \"f5b54dd3-a06f-4c77-88d4-88417c5d43eb\") " pod="openstack/barbican-db-create-xwwl8" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.947688 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67087c97-ae2d-400e-b3a4-455c5eb8f082-operator-scripts\") pod \"cinder-4150-account-create-update-pfkjg\" (UID: \"67087c97-ae2d-400e-b3a4-455c5eb8f082\") " pod="openstack/cinder-4150-account-create-update-pfkjg" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.947729 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvsmf\" (UniqueName: \"kubernetes.io/projected/67087c97-ae2d-400e-b3a4-455c5eb8f082-kube-api-access-pvsmf\") pod \"cinder-4150-account-create-update-pfkjg\" (UID: \"67087c97-ae2d-400e-b3a4-455c5eb8f082\") " pod="openstack/cinder-4150-account-create-update-pfkjg" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.957403 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-8xwrt"] Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.958598 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8xwrt" Dec 10 00:54:33 crc kubenswrapper[4884]: I1210 00:54:33.967897 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-8xwrt"] Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.003880 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-6db2-account-create-update-m9tlk"] Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.005217 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6db2-account-create-update-m9tlk" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.007560 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.009920 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-pm6qf" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.014087 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-6db2-account-create-update-m9tlk"] Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.043737 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-0b32-account-create-update-lb248" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.049031 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67087c97-ae2d-400e-b3a4-455c5eb8f082-operator-scripts\") pod \"cinder-4150-account-create-update-pfkjg\" (UID: \"67087c97-ae2d-400e-b3a4-455c5eb8f082\") " pod="openstack/cinder-4150-account-create-update-pfkjg" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.049080 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8af1f893-43a8-4ab0-8b44-4f2dd24340b0-operator-scripts\") pod \"cinder-db-create-8xwrt\" (UID: \"8af1f893-43a8-4ab0-8b44-4f2dd24340b0\") " pod="openstack/cinder-db-create-8xwrt" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.049111 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvsmf\" (UniqueName: \"kubernetes.io/projected/67087c97-ae2d-400e-b3a4-455c5eb8f082-kube-api-access-pvsmf\") pod \"cinder-4150-account-create-update-pfkjg\" (UID: \"67087c97-ae2d-400e-b3a4-455c5eb8f082\") " pod="openstack/cinder-4150-account-create-update-pfkjg" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.049198 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f5b54dd3-a06f-4c77-88d4-88417c5d43eb-operator-scripts\") pod \"barbican-db-create-xwwl8\" (UID: \"f5b54dd3-a06f-4c77-88d4-88417c5d43eb\") " pod="openstack/barbican-db-create-xwwl8" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.049241 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmxhp\" (UniqueName: \"kubernetes.io/projected/f5b54dd3-a06f-4c77-88d4-88417c5d43eb-kube-api-access-gmxhp\") pod \"barbican-db-create-xwwl8\" (UID: \"f5b54dd3-a06f-4c77-88d4-88417c5d43eb\") " pod="openstack/barbican-db-create-xwwl8" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.049264 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2c2f4f5-5345-4503-be32-860ebcf42d21-operator-scripts\") pod \"barbican-6db2-account-create-update-m9tlk\" (UID: \"c2c2f4f5-5345-4503-be32-860ebcf42d21\") " pod="openstack/barbican-6db2-account-create-update-m9tlk" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.049303 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9v99h\" (UniqueName: \"kubernetes.io/projected/c2c2f4f5-5345-4503-be32-860ebcf42d21-kube-api-access-9v99h\") pod \"barbican-6db2-account-create-update-m9tlk\" (UID: \"c2c2f4f5-5345-4503-be32-860ebcf42d21\") " pod="openstack/barbican-6db2-account-create-update-m9tlk" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.049324 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjwcn\" (UniqueName: \"kubernetes.io/projected/8af1f893-43a8-4ab0-8b44-4f2dd24340b0-kube-api-access-sjwcn\") pod \"cinder-db-create-8xwrt\" (UID: \"8af1f893-43a8-4ab0-8b44-4f2dd24340b0\") " pod="openstack/cinder-db-create-8xwrt" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.050036 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67087c97-ae2d-400e-b3a4-455c5eb8f082-operator-scripts\") pod \"cinder-4150-account-create-update-pfkjg\" (UID: \"67087c97-ae2d-400e-b3a4-455c5eb8f082\") " pod="openstack/cinder-4150-account-create-update-pfkjg" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.050709 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f5b54dd3-a06f-4c77-88d4-88417c5d43eb-operator-scripts\") pod \"barbican-db-create-xwwl8\" (UID: \"f5b54dd3-a06f-4c77-88d4-88417c5d43eb\") " pod="openstack/barbican-db-create-xwwl8" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.071860 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-rx8lw"] Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.073291 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-rx8lw" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.084744 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-rx8lw"] Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.087418 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmxhp\" (UniqueName: \"kubernetes.io/projected/f5b54dd3-a06f-4c77-88d4-88417c5d43eb-kube-api-access-gmxhp\") pod \"barbican-db-create-xwwl8\" (UID: \"f5b54dd3-a06f-4c77-88d4-88417c5d43eb\") " pod="openstack/barbican-db-create-xwwl8" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.096122 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvsmf\" (UniqueName: \"kubernetes.io/projected/67087c97-ae2d-400e-b3a4-455c5eb8f082-kube-api-access-pvsmf\") pod \"cinder-4150-account-create-update-pfkjg\" (UID: \"67087c97-ae2d-400e-b3a4-455c5eb8f082\") " pod="openstack/cinder-4150-account-create-update-pfkjg" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.103430 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4150-account-create-update-pfkjg" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.104020 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7788-account-create-update-zwq2d"] Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.105399 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7788-account-create-update-zwq2d" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.107426 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4","Type":"ContainerStarted","Data":"9331ae8a79ec6b3dc1c183e568778d9b3ee0603a8f68db5d1a1e12bde91c198a"} Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.116385 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.149801 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"5a298aca589731b0551d6d697c5c8a56fbf1136da501e4ad6af505a1c3bef2a5"} Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.149845 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"0faae54abe20ff66628f39f2ca9ab2d764940cd09c67169e1fa8cb0ba0740f1f"} Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.149856 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"347039b7210eef2aa2126a5c547de5f848cb47a63dc48c8cd22b0803f4eb5cfd"} Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.150544 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9v99h\" (UniqueName: \"kubernetes.io/projected/c2c2f4f5-5345-4503-be32-860ebcf42d21-kube-api-access-9v99h\") pod \"barbican-6db2-account-create-update-m9tlk\" (UID: \"c2c2f4f5-5345-4503-be32-860ebcf42d21\") " pod="openstack/barbican-6db2-account-create-update-m9tlk" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.150575 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjwcn\" (UniqueName: \"kubernetes.io/projected/8af1f893-43a8-4ab0-8b44-4f2dd24340b0-kube-api-access-sjwcn\") pod \"cinder-db-create-8xwrt\" (UID: \"8af1f893-43a8-4ab0-8b44-4f2dd24340b0\") " pod="openstack/cinder-db-create-8xwrt" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.150634 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkbpx\" (UniqueName: \"kubernetes.io/projected/2e1759e5-b6a9-4815-b344-bc94fb621f14-kube-api-access-xkbpx\") pod \"neutron-db-create-rx8lw\" (UID: \"2e1759e5-b6a9-4815-b344-bc94fb621f14\") " pod="openstack/neutron-db-create-rx8lw" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.150656 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8af1f893-43a8-4ab0-8b44-4f2dd24340b0-operator-scripts\") pod \"cinder-db-create-8xwrt\" (UID: \"8af1f893-43a8-4ab0-8b44-4f2dd24340b0\") " pod="openstack/cinder-db-create-8xwrt" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.150703 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdzt5\" (UniqueName: \"kubernetes.io/projected/b0386097-ea50-4085-a9f0-7fcde7163b25-kube-api-access-hdzt5\") pod \"neutron-7788-account-create-update-zwq2d\" (UID: \"b0386097-ea50-4085-a9f0-7fcde7163b25\") " pod="openstack/neutron-7788-account-create-update-zwq2d" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.150737 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e1759e5-b6a9-4815-b344-bc94fb621f14-operator-scripts\") pod \"neutron-db-create-rx8lw\" (UID: \"2e1759e5-b6a9-4815-b344-bc94fb621f14\") " pod="openstack/neutron-db-create-rx8lw" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.150805 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2c2f4f5-5345-4503-be32-860ebcf42d21-operator-scripts\") pod \"barbican-6db2-account-create-update-m9tlk\" (UID: \"c2c2f4f5-5345-4503-be32-860ebcf42d21\") " pod="openstack/barbican-6db2-account-create-update-m9tlk" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.150839 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0386097-ea50-4085-a9f0-7fcde7163b25-operator-scripts\") pod \"neutron-7788-account-create-update-zwq2d\" (UID: \"b0386097-ea50-4085-a9f0-7fcde7163b25\") " pod="openstack/neutron-7788-account-create-update-zwq2d" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.151933 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8af1f893-43a8-4ab0-8b44-4f2dd24340b0-operator-scripts\") pod \"cinder-db-create-8xwrt\" (UID: \"8af1f893-43a8-4ab0-8b44-4f2dd24340b0\") " pod="openstack/cinder-db-create-8xwrt" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.154682 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2c2f4f5-5345-4503-be32-860ebcf42d21-operator-scripts\") pod \"barbican-6db2-account-create-update-m9tlk\" (UID: \"c2c2f4f5-5345-4503-be32-860ebcf42d21\") " pod="openstack/barbican-6db2-account-create-update-m9tlk" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.158488 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7788-account-create-update-zwq2d"] Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.173950 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjwcn\" (UniqueName: \"kubernetes.io/projected/8af1f893-43a8-4ab0-8b44-4f2dd24340b0-kube-api-access-sjwcn\") pod \"cinder-db-create-8xwrt\" (UID: \"8af1f893-43a8-4ab0-8b44-4f2dd24340b0\") " pod="openstack/cinder-db-create-8xwrt" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.253396 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e1759e5-b6a9-4815-b344-bc94fb621f14-operator-scripts\") pod \"neutron-db-create-rx8lw\" (UID: \"2e1759e5-b6a9-4815-b344-bc94fb621f14\") " pod="openstack/neutron-db-create-rx8lw" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.253877 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0386097-ea50-4085-a9f0-7fcde7163b25-operator-scripts\") pod \"neutron-7788-account-create-update-zwq2d\" (UID: \"b0386097-ea50-4085-a9f0-7fcde7163b25\") " pod="openstack/neutron-7788-account-create-update-zwq2d" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.253944 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkbpx\" (UniqueName: \"kubernetes.io/projected/2e1759e5-b6a9-4815-b344-bc94fb621f14-kube-api-access-xkbpx\") pod \"neutron-db-create-rx8lw\" (UID: \"2e1759e5-b6a9-4815-b344-bc94fb621f14\") " pod="openstack/neutron-db-create-rx8lw" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.253984 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdzt5\" (UniqueName: \"kubernetes.io/projected/b0386097-ea50-4085-a9f0-7fcde7163b25-kube-api-access-hdzt5\") pod \"neutron-7788-account-create-update-zwq2d\" (UID: \"b0386097-ea50-4085-a9f0-7fcde7163b25\") " pod="openstack/neutron-7788-account-create-update-zwq2d" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.254587 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e1759e5-b6a9-4815-b344-bc94fb621f14-operator-scripts\") pod \"neutron-db-create-rx8lw\" (UID: \"2e1759e5-b6a9-4815-b344-bc94fb621f14\") " pod="openstack/neutron-db-create-rx8lw" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.254992 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0386097-ea50-4085-a9f0-7fcde7163b25-operator-scripts\") pod \"neutron-7788-account-create-update-zwq2d\" (UID: \"b0386097-ea50-4085-a9f0-7fcde7163b25\") " pod="openstack/neutron-7788-account-create-update-zwq2d" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.271893 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9v99h\" (UniqueName: \"kubernetes.io/projected/c2c2f4f5-5345-4503-be32-860ebcf42d21-kube-api-access-9v99h\") pod \"barbican-6db2-account-create-update-m9tlk\" (UID: \"c2c2f4f5-5345-4503-be32-860ebcf42d21\") " pod="openstack/barbican-6db2-account-create-update-m9tlk" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.295572 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkbpx\" (UniqueName: \"kubernetes.io/projected/2e1759e5-b6a9-4815-b344-bc94fb621f14-kube-api-access-xkbpx\") pod \"neutron-db-create-rx8lw\" (UID: \"2e1759e5-b6a9-4815-b344-bc94fb621f14\") " pod="openstack/neutron-db-create-rx8lw" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.300983 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdzt5\" (UniqueName: \"kubernetes.io/projected/b0386097-ea50-4085-a9f0-7fcde7163b25-kube-api-access-hdzt5\") pod \"neutron-7788-account-create-update-zwq2d\" (UID: \"b0386097-ea50-4085-a9f0-7fcde7163b25\") " pod="openstack/neutron-7788-account-create-update-zwq2d" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.336205 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-xwwl8" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.416232 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8xwrt" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.451197 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6db2-account-create-update-m9tlk" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.462466 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-rx8lw" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.466918 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7788-account-create-update-zwq2d" Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.508031 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-0b32-account-create-update-lb248"] Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.755646 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-pm6qf"] Dec 10 00:54:34 crc kubenswrapper[4884]: I1210 00:54:34.945544 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-4150-account-create-update-pfkjg"] Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.019527 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-jczhh"] Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.020922 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.027986 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.030718 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.039502 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.039715 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8z4pv" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.054414 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-jczhh"] Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.090737 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/290cb163-68fb-48f9-a3d1-695333c2499b-combined-ca-bundle\") pod \"keystone-db-sync-jczhh\" (UID: \"290cb163-68fb-48f9-a3d1-695333c2499b\") " pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.090903 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/290cb163-68fb-48f9-a3d1-695333c2499b-config-data\") pod \"keystone-db-sync-jczhh\" (UID: \"290cb163-68fb-48f9-a3d1-695333c2499b\") " pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.090946 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctcqk\" (UniqueName: \"kubernetes.io/projected/290cb163-68fb-48f9-a3d1-695333c2499b-kube-api-access-ctcqk\") pod \"keystone-db-sync-jczhh\" (UID: \"290cb163-68fb-48f9-a3d1-695333c2499b\") " pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.160883 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-xwwl8"] Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.202991 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/290cb163-68fb-48f9-a3d1-695333c2499b-combined-ca-bundle\") pod \"keystone-db-sync-jczhh\" (UID: \"290cb163-68fb-48f9-a3d1-695333c2499b\") " pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.205944 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/290cb163-68fb-48f9-a3d1-695333c2499b-config-data\") pod \"keystone-db-sync-jczhh\" (UID: \"290cb163-68fb-48f9-a3d1-695333c2499b\") " pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.206078 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctcqk\" (UniqueName: \"kubernetes.io/projected/290cb163-68fb-48f9-a3d1-695333c2499b-kube-api-access-ctcqk\") pod \"keystone-db-sync-jczhh\" (UID: \"290cb163-68fb-48f9-a3d1-695333c2499b\") " pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.234952 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-0b32-account-create-update-lb248" event={"ID":"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43","Type":"ContainerStarted","Data":"cbe999af5c13574893266afeb7b7099e5a1b5531d84cefca50e767c2001a5c20"} Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.258188 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-pm6qf" event={"ID":"9ee033e3-bd08-4fe7-8efb-d6b81e79796c","Type":"ContainerStarted","Data":"89086adadcab7ece8586ba72603970c224ca35ee59d5c3d45477ec11daaa4631"} Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.266365 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"16cd035909820f09421385bfbdff53f76fa25ae7db23d603118203667a49890a"} Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.267905 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4150-account-create-update-pfkjg" event={"ID":"67087c97-ae2d-400e-b3a4-455c5eb8f082","Type":"ContainerStarted","Data":"742fbab21225505849724bdc6a3b4cf5c1cd67e0d4ee7548eaa334aea9fd8a25"} Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.486212 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/290cb163-68fb-48f9-a3d1-695333c2499b-combined-ca-bundle\") pod \"keystone-db-sync-jczhh\" (UID: \"290cb163-68fb-48f9-a3d1-695333c2499b\") " pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.487468 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/290cb163-68fb-48f9-a3d1-695333c2499b-config-data\") pod \"keystone-db-sync-jczhh\" (UID: \"290cb163-68fb-48f9-a3d1-695333c2499b\") " pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.491687 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctcqk\" (UniqueName: \"kubernetes.io/projected/290cb163-68fb-48f9-a3d1-695333c2499b-kube-api-access-ctcqk\") pod \"keystone-db-sync-jczhh\" (UID: \"290cb163-68fb-48f9-a3d1-695333c2499b\") " pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:35 crc kubenswrapper[4884]: W1210 00:54:35.573655 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5b54dd3_a06f_4c77_88d4_88417c5d43eb.slice/crio-08413f0250c80d41ba260ce78584fa21b92c9daab504dd6b0f1d040201d5a8a5 WatchSource:0}: Error finding container 08413f0250c80d41ba260ce78584fa21b92c9daab504dd6b0f1d040201d5a8a5: Status 404 returned error can't find the container with id 08413f0250c80d41ba260ce78584fa21b92c9daab504dd6b0f1d040201d5a8a5 Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.655189 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:35 crc kubenswrapper[4884]: I1210 00:54:35.756251 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-8xwrt"] Dec 10 00:54:36 crc kubenswrapper[4884]: I1210 00:54:36.105022 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-rx8lw"] Dec 10 00:54:36 crc kubenswrapper[4884]: I1210 00:54:36.190779 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-6db2-account-create-update-m9tlk"] Dec 10 00:54:36 crc kubenswrapper[4884]: I1210 00:54:36.286755 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8xwrt" event={"ID":"8af1f893-43a8-4ab0-8b44-4f2dd24340b0","Type":"ContainerStarted","Data":"59317daa5204d8b98fe1ccefcd0a63544ec6337094f5cf50335537eb2b4eb1e7"} Dec 10 00:54:36 crc kubenswrapper[4884]: I1210 00:54:36.290216 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9879cc81-0cad-4e77-90e5-46afd9adb241","Type":"ContainerStarted","Data":"743fff30662d1cc120603a18720dbc4f5a42f120a1597fa7f06cf37c4ca5af20"} Dec 10 00:54:36 crc kubenswrapper[4884]: I1210 00:54:36.294482 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-xwwl8" event={"ID":"f5b54dd3-a06f-4c77-88d4-88417c5d43eb","Type":"ContainerStarted","Data":"08413f0250c80d41ba260ce78584fa21b92c9daab504dd6b0f1d040201d5a8a5"} Dec 10 00:54:36 crc kubenswrapper[4884]: I1210 00:54:36.342994 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7788-account-create-update-zwq2d"] Dec 10 00:54:36 crc kubenswrapper[4884]: W1210 00:54:36.622479 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb0386097_ea50_4085_a9f0_7fcde7163b25.slice/crio-9c4ca18c93d19ec5765b4e076da2dc823fa73981aae46ba746ed6e11c301ee2f WatchSource:0}: Error finding container 9c4ca18c93d19ec5765b4e076da2dc823fa73981aae46ba746ed6e11c301ee2f: Status 404 returned error can't find the container with id 9c4ca18c93d19ec5765b4e076da2dc823fa73981aae46ba746ed6e11c301ee2f Dec 10 00:54:37 crc kubenswrapper[4884]: I1210 00:54:37.315771 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7788-account-create-update-zwq2d" event={"ID":"b0386097-ea50-4085-a9f0-7fcde7163b25","Type":"ContainerStarted","Data":"9c4ca18c93d19ec5765b4e076da2dc823fa73981aae46ba746ed6e11c301ee2f"} Dec 10 00:54:37 crc kubenswrapper[4884]: I1210 00:54:37.324335 4884 generic.go:334] "Generic (PLEG): container finished" podID="9ee033e3-bd08-4fe7-8efb-d6b81e79796c" containerID="afd5fed80eeac90680b8eeb4e52d5c23f1108fce92b8d26143606b98ef33a5fd" exitCode=0 Dec 10 00:54:37 crc kubenswrapper[4884]: I1210 00:54:37.324399 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-pm6qf" event={"ID":"9ee033e3-bd08-4fe7-8efb-d6b81e79796c","Type":"ContainerDied","Data":"afd5fed80eeac90680b8eeb4e52d5c23f1108fce92b8d26143606b98ef33a5fd"} Dec 10 00:54:37 crc kubenswrapper[4884]: I1210 00:54:37.341930 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6db2-account-create-update-m9tlk" event={"ID":"c2c2f4f5-5345-4503-be32-860ebcf42d21","Type":"ContainerStarted","Data":"8327cbc549c294df51c082f4505608cda97cb0a5a3c7ec575a4072f7507b820b"} Dec 10 00:54:37 crc kubenswrapper[4884]: I1210 00:54:37.348080 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-rx8lw" event={"ID":"2e1759e5-b6a9-4815-b344-bc94fb621f14","Type":"ContainerStarted","Data":"13d525b571eee0541ba40838e29d529832b8dd181adf08063cdbba590de3293e"} Dec 10 00:54:37 crc kubenswrapper[4884]: I1210 00:54:37.355242 4884 generic.go:334] "Generic (PLEG): container finished" podID="67087c97-ae2d-400e-b3a4-455c5eb8f082" containerID="d7ee0373f33acbeee34548dbd59122b7458043112c5bd39c58dbc2638fc893a0" exitCode=0 Dec 10 00:54:37 crc kubenswrapper[4884]: I1210 00:54:37.355288 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4150-account-create-update-pfkjg" event={"ID":"67087c97-ae2d-400e-b3a4-455c5eb8f082","Type":"ContainerDied","Data":"d7ee0373f33acbeee34548dbd59122b7458043112c5bd39c58dbc2638fc893a0"} Dec 10 00:54:38 crc kubenswrapper[4884]: I1210 00:54:38.364532 4884 generic.go:334] "Generic (PLEG): container finished" podID="4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43" containerID="ab1133ce19b4de1af4bbcf288ae900fd93816d8a9cded3eda5f3a35f3c086418" exitCode=0 Dec 10 00:54:38 crc kubenswrapper[4884]: I1210 00:54:38.364596 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-0b32-account-create-update-lb248" event={"ID":"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43","Type":"ContainerDied","Data":"ab1133ce19b4de1af4bbcf288ae900fd93816d8a9cded3eda5f3a35f3c086418"} Dec 10 00:54:38 crc kubenswrapper[4884]: I1210 00:54:38.367654 4884 generic.go:334] "Generic (PLEG): container finished" podID="f5b54dd3-a06f-4c77-88d4-88417c5d43eb" containerID="8488ed6846b9ccd6b1a82f369c7730460c2865ee0ca20b9303defb047555336b" exitCode=0 Dec 10 00:54:38 crc kubenswrapper[4884]: I1210 00:54:38.367730 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-xwwl8" event={"ID":"f5b54dd3-a06f-4c77-88d4-88417c5d43eb","Type":"ContainerDied","Data":"8488ed6846b9ccd6b1a82f369c7730460c2865ee0ca20b9303defb047555336b"} Dec 10 00:54:46 crc kubenswrapper[4884]: E1210 00:54:46.885360 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Dec 10 00:54:46 crc kubenswrapper[4884]: E1210 00:54:46.886155 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gtcgj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-bd4dd_openstack(db50dd06-d67c-468e-88de-6a8fb86bd1bd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:54:46 crc kubenswrapper[4884]: E1210 00:54:46.887471 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-bd4dd" podUID="db50dd06-d67c-468e-88de-6a8fb86bd1bd" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.219217 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-xwwl8" Dec 10 00:54:47 crc kubenswrapper[4884]: W1210 00:54:47.354945 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod290cb163_68fb_48f9_a3d1_695333c2499b.slice/crio-6a030fdbd76b315415a6290fd423f9ec1a35d5a6ec8d32394eda4ad89dd1a8c0 WatchSource:0}: Error finding container 6a030fdbd76b315415a6290fd423f9ec1a35d5a6ec8d32394eda4ad89dd1a8c0: Status 404 returned error can't find the container with id 6a030fdbd76b315415a6290fd423f9ec1a35d5a6ec8d32394eda4ad89dd1a8c0 Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.357214 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-0b32-account-create-update-lb248" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.360096 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-jczhh"] Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.392680 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f5b54dd3-a06f-4c77-88d4-88417c5d43eb-operator-scripts\") pod \"f5b54dd3-a06f-4c77-88d4-88417c5d43eb\" (UID: \"f5b54dd3-a06f-4c77-88d4-88417c5d43eb\") " Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.392966 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmxhp\" (UniqueName: \"kubernetes.io/projected/f5b54dd3-a06f-4c77-88d4-88417c5d43eb-kube-api-access-gmxhp\") pod \"f5b54dd3-a06f-4c77-88d4-88417c5d43eb\" (UID: \"f5b54dd3-a06f-4c77-88d4-88417c5d43eb\") " Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.394837 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5b54dd3-a06f-4c77-88d4-88417c5d43eb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f5b54dd3-a06f-4c77-88d4-88417c5d43eb" (UID: "f5b54dd3-a06f-4c77-88d4-88417c5d43eb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.406320 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5b54dd3-a06f-4c77-88d4-88417c5d43eb-kube-api-access-gmxhp" (OuterVolumeSpecName: "kube-api-access-gmxhp") pod "f5b54dd3-a06f-4c77-88d4-88417c5d43eb" (UID: "f5b54dd3-a06f-4c77-88d4-88417c5d43eb"). InnerVolumeSpecName "kube-api-access-gmxhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.414794 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4150-account-create-update-pfkjg" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.441775 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-pm6qf" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.494104 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43-operator-scripts\") pod \"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43\" (UID: \"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43\") " Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.494341 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-47z54\" (UniqueName: \"kubernetes.io/projected/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43-kube-api-access-47z54\") pod \"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43\" (UID: \"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43\") " Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.494896 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43" (UID: "4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.495530 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmxhp\" (UniqueName: \"kubernetes.io/projected/f5b54dd3-a06f-4c77-88d4-88417c5d43eb-kube-api-access-gmxhp\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.495551 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f5b54dd3-a06f-4c77-88d4-88417c5d43eb-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.495561 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.499445 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43-kube-api-access-47z54" (OuterVolumeSpecName: "kube-api-access-47z54") pod "4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43" (UID: "4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43"). InnerVolumeSpecName "kube-api-access-47z54". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.596775 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6n2r\" (UniqueName: \"kubernetes.io/projected/9ee033e3-bd08-4fe7-8efb-d6b81e79796c-kube-api-access-l6n2r\") pod \"9ee033e3-bd08-4fe7-8efb-d6b81e79796c\" (UID: \"9ee033e3-bd08-4fe7-8efb-d6b81e79796c\") " Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.596827 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67087c97-ae2d-400e-b3a4-455c5eb8f082-operator-scripts\") pod \"67087c97-ae2d-400e-b3a4-455c5eb8f082\" (UID: \"67087c97-ae2d-400e-b3a4-455c5eb8f082\") " Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.596870 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ee033e3-bd08-4fe7-8efb-d6b81e79796c-operator-scripts\") pod \"9ee033e3-bd08-4fe7-8efb-d6b81e79796c\" (UID: \"9ee033e3-bd08-4fe7-8efb-d6b81e79796c\") " Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.596973 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvsmf\" (UniqueName: \"kubernetes.io/projected/67087c97-ae2d-400e-b3a4-455c5eb8f082-kube-api-access-pvsmf\") pod \"67087c97-ae2d-400e-b3a4-455c5eb8f082\" (UID: \"67087c97-ae2d-400e-b3a4-455c5eb8f082\") " Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.597624 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-47z54\" (UniqueName: \"kubernetes.io/projected/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43-kube-api-access-47z54\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.598058 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67087c97-ae2d-400e-b3a4-455c5eb8f082-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "67087c97-ae2d-400e-b3a4-455c5eb8f082" (UID: "67087c97-ae2d-400e-b3a4-455c5eb8f082"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.598072 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ee033e3-bd08-4fe7-8efb-d6b81e79796c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9ee033e3-bd08-4fe7-8efb-d6b81e79796c" (UID: "9ee033e3-bd08-4fe7-8efb-d6b81e79796c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.601538 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ee033e3-bd08-4fe7-8efb-d6b81e79796c-kube-api-access-l6n2r" (OuterVolumeSpecName: "kube-api-access-l6n2r") pod "9ee033e3-bd08-4fe7-8efb-d6b81e79796c" (UID: "9ee033e3-bd08-4fe7-8efb-d6b81e79796c"). InnerVolumeSpecName "kube-api-access-l6n2r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.602597 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67087c97-ae2d-400e-b3a4-455c5eb8f082-kube-api-access-pvsmf" (OuterVolumeSpecName: "kube-api-access-pvsmf") pod "67087c97-ae2d-400e-b3a4-455c5eb8f082" (UID: "67087c97-ae2d-400e-b3a4-455c5eb8f082"). InnerVolumeSpecName "kube-api-access-pvsmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.621032 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8xwrt" event={"ID":"8af1f893-43a8-4ab0-8b44-4f2dd24340b0","Type":"ContainerStarted","Data":"c937161b5d60dc9246e70fbc3f926ace9c34dd782303419af234f313a8bce6b8"} Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.625532 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"9879cc81-0cad-4e77-90e5-46afd9adb241","Type":"ContainerStarted","Data":"defedb22a0084341c42757a016f446dbb4a6de7dd4fa4bd1f90290a58ffde6dc"} Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.627742 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-0b32-account-create-update-lb248" event={"ID":"4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43","Type":"ContainerDied","Data":"cbe999af5c13574893266afeb7b7099e5a1b5531d84cefca50e767c2001a5c20"} Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.627768 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cbe999af5c13574893266afeb7b7099e5a1b5531d84cefca50e767c2001a5c20" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.627805 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-0b32-account-create-update-lb248" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.634114 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7788-account-create-update-zwq2d" event={"ID":"b0386097-ea50-4085-a9f0-7fcde7163b25","Type":"ContainerStarted","Data":"fecadb8cf74ec042d376f671ca89fefddb08b082c7faa83c14319116ddff7839"} Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.635470 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-8xwrt" podStartSLOduration=14.635450524 podStartE2EDuration="14.635450524s" podCreationTimestamp="2025-12-10 00:54:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:54:47.634883639 +0000 UTC m=+1460.712840766" watchObservedRunningTime="2025-12-10 00:54:47.635450524 +0000 UTC m=+1460.713407641" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.636779 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-xwwl8" event={"ID":"f5b54dd3-a06f-4c77-88d4-88417c5d43eb","Type":"ContainerDied","Data":"08413f0250c80d41ba260ce78584fa21b92c9daab504dd6b0f1d040201d5a8a5"} Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.636810 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="08413f0250c80d41ba260ce78584fa21b92c9daab504dd6b0f1d040201d5a8a5" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.636866 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-xwwl8" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.640993 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-jczhh" event={"ID":"290cb163-68fb-48f9-a3d1-695333c2499b","Type":"ContainerStarted","Data":"6a030fdbd76b315415a6290fd423f9ec1a35d5a6ec8d32394eda4ad89dd1a8c0"} Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.670660 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"965b1df644aed10da5852bfbbde7732e4ba42e5591173c93691814a7afb1b179"} Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.672119 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4","Type":"ContainerStarted","Data":"10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca"} Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.673785 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-pm6qf" event={"ID":"9ee033e3-bd08-4fe7-8efb-d6b81e79796c","Type":"ContainerDied","Data":"89086adadcab7ece8586ba72603970c224ca35ee59d5c3d45477ec11daaa4631"} Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.673854 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="89086adadcab7ece8586ba72603970c224ca35ee59d5c3d45477ec11daaa4631" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.673988 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-pm6qf" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.675329 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6db2-account-create-update-m9tlk" event={"ID":"c2c2f4f5-5345-4503-be32-860ebcf42d21","Type":"ContainerStarted","Data":"6c7978f28cd94d2d6706df19963d9d6859d1bf52b2cad035e9b400dcdc3c853f"} Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.679791 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-rx8lw" event={"ID":"2e1759e5-b6a9-4815-b344-bc94fb621f14","Type":"ContainerStarted","Data":"fca5fc589fbc2be3381c5c140f587adc8b9fb7a3c0a6f44d700fc828d97a39bc"} Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.695398 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-4150-account-create-update-pfkjg" event={"ID":"67087c97-ae2d-400e-b3a4-455c5eb8f082","Type":"ContainerDied","Data":"742fbab21225505849724bdc6a3b4cf5c1cd67e0d4ee7548eaa334aea9fd8a25"} Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.695456 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="742fbab21225505849724bdc6a3b4cf5c1cd67e0d4ee7548eaa334aea9fd8a25" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.695558 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-4150-account-create-update-pfkjg" Dec 10 00:54:47 crc kubenswrapper[4884]: E1210 00:54:47.699067 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-bd4dd" podUID="db50dd06-d67c-468e-88de-6a8fb86bd1bd" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.700647 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=25.700632949 podStartE2EDuration="25.700632949s" podCreationTimestamp="2025-12-10 00:54:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:54:47.660180724 +0000 UTC m=+1460.738137851" watchObservedRunningTime="2025-12-10 00:54:47.700632949 +0000 UTC m=+1460.778590056" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.705799 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6n2r\" (UniqueName: \"kubernetes.io/projected/9ee033e3-bd08-4fe7-8efb-d6b81e79796c-kube-api-access-l6n2r\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.705832 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/67087c97-ae2d-400e-b3a4-455c5eb8f082-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.705841 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ee033e3-bd08-4fe7-8efb-d6b81e79796c-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.705850 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvsmf\" (UniqueName: \"kubernetes.io/projected/67087c97-ae2d-400e-b3a4-455c5eb8f082-kube-api-access-pvsmf\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.709408 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7788-account-create-update-zwq2d" podStartSLOduration=13.709393516 podStartE2EDuration="13.709393516s" podCreationTimestamp="2025-12-10 00:54:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:54:47.678637533 +0000 UTC m=+1460.756594670" watchObservedRunningTime="2025-12-10 00:54:47.709393516 +0000 UTC m=+1460.787350633" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.748441 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-6db2-account-create-update-m9tlk" podStartSLOduration=14.748412283 podStartE2EDuration="14.748412283s" podCreationTimestamp="2025-12-10 00:54:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:54:47.700506136 +0000 UTC m=+1460.778463253" watchObservedRunningTime="2025-12-10 00:54:47.748412283 +0000 UTC m=+1460.826369400" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.754336 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.686787625 podStartE2EDuration="15.754318493s" podCreationTimestamp="2025-12-10 00:54:32 +0000 UTC" firstStartedPulling="2025-12-10 00:54:33.768297701 +0000 UTC m=+1446.846254818" lastFinishedPulling="2025-12-10 00:54:46.835828579 +0000 UTC m=+1459.913785686" observedRunningTime="2025-12-10 00:54:47.725524673 +0000 UTC m=+1460.803481790" watchObservedRunningTime="2025-12-10 00:54:47.754318493 +0000 UTC m=+1460.832275610" Dec 10 00:54:47 crc kubenswrapper[4884]: I1210 00:54:47.785583 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-rx8lw" podStartSLOduration=14.785567689 podStartE2EDuration="14.785567689s" podCreationTimestamp="2025-12-10 00:54:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:54:47.785321382 +0000 UTC m=+1460.863278509" watchObservedRunningTime="2025-12-10 00:54:47.785567689 +0000 UTC m=+1460.863524806" Dec 10 00:54:48 crc kubenswrapper[4884]: I1210 00:54:48.709637 4884 generic.go:334] "Generic (PLEG): container finished" podID="b0386097-ea50-4085-a9f0-7fcde7163b25" containerID="fecadb8cf74ec042d376f671ca89fefddb08b082c7faa83c14319116ddff7839" exitCode=0 Dec 10 00:54:48 crc kubenswrapper[4884]: I1210 00:54:48.709910 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7788-account-create-update-zwq2d" event={"ID":"b0386097-ea50-4085-a9f0-7fcde7163b25","Type":"ContainerDied","Data":"fecadb8cf74ec042d376f671ca89fefddb08b082c7faa83c14319116ddff7839"} Dec 10 00:54:48 crc kubenswrapper[4884]: I1210 00:54:48.717712 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"4f0fe29f67269b1dbf1f0cda388b88d5ed07c47acd1e097ce068f0bdfab13e78"} Dec 10 00:54:48 crc kubenswrapper[4884]: I1210 00:54:48.717753 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"408cce967ffb22e57a0feb630233b5d1e14b703a71ecd35da85390ea786180b9"} Dec 10 00:54:48 crc kubenswrapper[4884]: I1210 00:54:48.717764 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"87d13ebf249603b3f8dd82d7baea38c0e98cdcbec3f7790b580702493a524904"} Dec 10 00:54:48 crc kubenswrapper[4884]: I1210 00:54:48.725620 4884 generic.go:334] "Generic (PLEG): container finished" podID="8af1f893-43a8-4ab0-8b44-4f2dd24340b0" containerID="c937161b5d60dc9246e70fbc3f926ace9c34dd782303419af234f313a8bce6b8" exitCode=0 Dec 10 00:54:48 crc kubenswrapper[4884]: I1210 00:54:48.725724 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8xwrt" event={"ID":"8af1f893-43a8-4ab0-8b44-4f2dd24340b0","Type":"ContainerDied","Data":"c937161b5d60dc9246e70fbc3f926ace9c34dd782303419af234f313a8bce6b8"} Dec 10 00:54:48 crc kubenswrapper[4884]: I1210 00:54:48.728206 4884 generic.go:334] "Generic (PLEG): container finished" podID="c2c2f4f5-5345-4503-be32-860ebcf42d21" containerID="6c7978f28cd94d2d6706df19963d9d6859d1bf52b2cad035e9b400dcdc3c853f" exitCode=0 Dec 10 00:54:48 crc kubenswrapper[4884]: I1210 00:54:48.728246 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6db2-account-create-update-m9tlk" event={"ID":"c2c2f4f5-5345-4503-be32-860ebcf42d21","Type":"ContainerDied","Data":"6c7978f28cd94d2d6706df19963d9d6859d1bf52b2cad035e9b400dcdc3c853f"} Dec 10 00:54:48 crc kubenswrapper[4884]: I1210 00:54:48.729878 4884 generic.go:334] "Generic (PLEG): container finished" podID="2e1759e5-b6a9-4815-b344-bc94fb621f14" containerID="fca5fc589fbc2be3381c5c140f587adc8b9fb7a3c0a6f44d700fc828d97a39bc" exitCode=0 Dec 10 00:54:48 crc kubenswrapper[4884]: I1210 00:54:48.730914 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-rx8lw" event={"ID":"2e1759e5-b6a9-4815-b344-bc94fb621f14","Type":"ContainerDied","Data":"fca5fc589fbc2be3381c5c140f587adc8b9fb7a3c0a6f44d700fc828d97a39bc"} Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.491614 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.491988 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.498286 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.565877 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7788-account-create-update-zwq2d" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.600313 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-rx8lw" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.635629 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6db2-account-create-update-m9tlk" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.640756 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8xwrt" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.709270 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2c2f4f5-5345-4503-be32-860ebcf42d21-operator-scripts\") pod \"c2c2f4f5-5345-4503-be32-860ebcf42d21\" (UID: \"c2c2f4f5-5345-4503-be32-860ebcf42d21\") " Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.709353 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkbpx\" (UniqueName: \"kubernetes.io/projected/2e1759e5-b6a9-4815-b344-bc94fb621f14-kube-api-access-xkbpx\") pod \"2e1759e5-b6a9-4815-b344-bc94fb621f14\" (UID: \"2e1759e5-b6a9-4815-b344-bc94fb621f14\") " Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.709490 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdzt5\" (UniqueName: \"kubernetes.io/projected/b0386097-ea50-4085-a9f0-7fcde7163b25-kube-api-access-hdzt5\") pod \"b0386097-ea50-4085-a9f0-7fcde7163b25\" (UID: \"b0386097-ea50-4085-a9f0-7fcde7163b25\") " Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.709586 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9v99h\" (UniqueName: \"kubernetes.io/projected/c2c2f4f5-5345-4503-be32-860ebcf42d21-kube-api-access-9v99h\") pod \"c2c2f4f5-5345-4503-be32-860ebcf42d21\" (UID: \"c2c2f4f5-5345-4503-be32-860ebcf42d21\") " Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.709696 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e1759e5-b6a9-4815-b344-bc94fb621f14-operator-scripts\") pod \"2e1759e5-b6a9-4815-b344-bc94fb621f14\" (UID: \"2e1759e5-b6a9-4815-b344-bc94fb621f14\") " Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.709747 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0386097-ea50-4085-a9f0-7fcde7163b25-operator-scripts\") pod \"b0386097-ea50-4085-a9f0-7fcde7163b25\" (UID: \"b0386097-ea50-4085-a9f0-7fcde7163b25\") " Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.709817 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c2c2f4f5-5345-4503-be32-860ebcf42d21-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c2c2f4f5-5345-4503-be32-860ebcf42d21" (UID: "c2c2f4f5-5345-4503-be32-860ebcf42d21"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.710266 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c2c2f4f5-5345-4503-be32-860ebcf42d21-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.710642 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e1759e5-b6a9-4815-b344-bc94fb621f14-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2e1759e5-b6a9-4815-b344-bc94fb621f14" (UID: "2e1759e5-b6a9-4815-b344-bc94fb621f14"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.710757 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0386097-ea50-4085-a9f0-7fcde7163b25-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b0386097-ea50-4085-a9f0-7fcde7163b25" (UID: "b0386097-ea50-4085-a9f0-7fcde7163b25"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.715003 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0386097-ea50-4085-a9f0-7fcde7163b25-kube-api-access-hdzt5" (OuterVolumeSpecName: "kube-api-access-hdzt5") pod "b0386097-ea50-4085-a9f0-7fcde7163b25" (UID: "b0386097-ea50-4085-a9f0-7fcde7163b25"). InnerVolumeSpecName "kube-api-access-hdzt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.715082 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e1759e5-b6a9-4815-b344-bc94fb621f14-kube-api-access-xkbpx" (OuterVolumeSpecName: "kube-api-access-xkbpx") pod "2e1759e5-b6a9-4815-b344-bc94fb621f14" (UID: "2e1759e5-b6a9-4815-b344-bc94fb621f14"). InnerVolumeSpecName "kube-api-access-xkbpx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.720611 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2c2f4f5-5345-4503-be32-860ebcf42d21-kube-api-access-9v99h" (OuterVolumeSpecName: "kube-api-access-9v99h") pod "c2c2f4f5-5345-4503-be32-860ebcf42d21" (UID: "c2c2f4f5-5345-4503-be32-860ebcf42d21"). InnerVolumeSpecName "kube-api-access-9v99h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.783553 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-8xwrt" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.783564 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-8xwrt" event={"ID":"8af1f893-43a8-4ab0-8b44-4f2dd24340b0","Type":"ContainerDied","Data":"59317daa5204d8b98fe1ccefcd0a63544ec6337094f5cf50335537eb2b4eb1e7"} Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.783597 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="59317daa5204d8b98fe1ccefcd0a63544ec6337094f5cf50335537eb2b4eb1e7" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.785401 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-6db2-account-create-update-m9tlk" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.785776 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-6db2-account-create-update-m9tlk" event={"ID":"c2c2f4f5-5345-4503-be32-860ebcf42d21","Type":"ContainerDied","Data":"8327cbc549c294df51c082f4505608cda97cb0a5a3c7ec575a4072f7507b820b"} Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.785818 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8327cbc549c294df51c082f4505608cda97cb0a5a3c7ec575a4072f7507b820b" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.787776 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-rx8lw" event={"ID":"2e1759e5-b6a9-4815-b344-bc94fb621f14","Type":"ContainerDied","Data":"13d525b571eee0541ba40838e29d529832b8dd181adf08063cdbba590de3293e"} Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.787820 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13d525b571eee0541ba40838e29d529832b8dd181adf08063cdbba590de3293e" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.787854 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-rx8lw" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.790823 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7788-account-create-update-zwq2d" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.790818 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7788-account-create-update-zwq2d" event={"ID":"b0386097-ea50-4085-a9f0-7fcde7163b25","Type":"ContainerDied","Data":"9c4ca18c93d19ec5765b4e076da2dc823fa73981aae46ba746ed6e11c301ee2f"} Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.791043 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c4ca18c93d19ec5765b4e076da2dc823fa73981aae46ba746ed6e11c301ee2f" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.792563 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-jczhh" event={"ID":"290cb163-68fb-48f9-a3d1-695333c2499b","Type":"ContainerStarted","Data":"2c724810b32bcf2a1149726dff4e07751a96cb1a476b47900a738c030fa5eeb1"} Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.821601 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjwcn\" (UniqueName: \"kubernetes.io/projected/8af1f893-43a8-4ab0-8b44-4f2dd24340b0-kube-api-access-sjwcn\") pod \"8af1f893-43a8-4ab0-8b44-4f2dd24340b0\" (UID: \"8af1f893-43a8-4ab0-8b44-4f2dd24340b0\") " Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.821866 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8af1f893-43a8-4ab0-8b44-4f2dd24340b0-operator-scripts\") pod \"8af1f893-43a8-4ab0-8b44-4f2dd24340b0\" (UID: \"8af1f893-43a8-4ab0-8b44-4f2dd24340b0\") " Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.822317 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"622fe9bea61185eef4481d75a1a0a5f71cb5c68376af3ef5b11831a62a9956a2"} Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.823351 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8af1f893-43a8-4ab0-8b44-4f2dd24340b0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8af1f893-43a8-4ab0-8b44-4f2dd24340b0" (UID: "8af1f893-43a8-4ab0-8b44-4f2dd24340b0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.826491 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8af1f893-43a8-4ab0-8b44-4f2dd24340b0-kube-api-access-sjwcn" (OuterVolumeSpecName: "kube-api-access-sjwcn") pod "8af1f893-43a8-4ab0-8b44-4f2dd24340b0" (UID: "8af1f893-43a8-4ab0-8b44-4f2dd24340b0"). InnerVolumeSpecName "kube-api-access-sjwcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.829292 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.839125 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9v99h\" (UniqueName: \"kubernetes.io/projected/c2c2f4f5-5345-4503-be32-860ebcf42d21-kube-api-access-9v99h\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.839173 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8af1f893-43a8-4ab0-8b44-4f2dd24340b0-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.839188 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e1759e5-b6a9-4815-b344-bc94fb621f14-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.839201 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b0386097-ea50-4085-a9f0-7fcde7163b25-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.839213 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjwcn\" (UniqueName: \"kubernetes.io/projected/8af1f893-43a8-4ab0-8b44-4f2dd24340b0-kube-api-access-sjwcn\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.839230 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkbpx\" (UniqueName: \"kubernetes.io/projected/2e1759e5-b6a9-4815-b344-bc94fb621f14-kube-api-access-xkbpx\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.839241 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdzt5\" (UniqueName: \"kubernetes.io/projected/b0386097-ea50-4085-a9f0-7fcde7163b25-kube-api-access-hdzt5\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:52 crc kubenswrapper[4884]: I1210 00:54:52.846408 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-jczhh" podStartSLOduration=13.760015488 podStartE2EDuration="18.841136308s" podCreationTimestamp="2025-12-10 00:54:34 +0000 UTC" firstStartedPulling="2025-12-10 00:54:47.357574959 +0000 UTC m=+1460.435532076" lastFinishedPulling="2025-12-10 00:54:52.438695779 +0000 UTC m=+1465.516652896" observedRunningTime="2025-12-10 00:54:52.820162759 +0000 UTC m=+1465.898119876" watchObservedRunningTime="2025-12-10 00:54:52.841136308 +0000 UTC m=+1465.919093425" Dec 10 00:54:53 crc kubenswrapper[4884]: I1210 00:54:53.832582 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"e8a7b09484d0d275eb0df833b212adef247dc4161c7680780176b9f22e523425"} Dec 10 00:54:53 crc kubenswrapper[4884]: I1210 00:54:53.832875 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"317d8936d6551d545577154cf457aeb9c7a11d670fc649f75f3c0f5b9eb05e27"} Dec 10 00:54:53 crc kubenswrapper[4884]: I1210 00:54:53.832887 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"bd9331df832f9ceda1eb0df49a8e41b3862b800596ea9048d9934b66274bdc82"} Dec 10 00:54:53 crc kubenswrapper[4884]: I1210 00:54:53.832897 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"d9965453c6739e91412b8a1c40cfc3f427d0c6fdc909db816475432487e4dde0"} Dec 10 00:54:54 crc kubenswrapper[4884]: I1210 00:54:54.849371 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"c5e9b3039013b1a0cb798cb99227a5432a653ff96fb5fc398591098dc9a74bb6"} Dec 10 00:54:54 crc kubenswrapper[4884]: I1210 00:54:54.849777 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"77e9a322-ea03-4101-b8be-d1e09f67e8c2","Type":"ContainerStarted","Data":"b5152a1c10a15937e02f8c84a7ba7ccf619f2c2bbc3c2ec6bc369a75f6dbd12c"} Dec 10 00:54:54 crc kubenswrapper[4884]: I1210 00:54:54.886601 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=36.24465783 podStartE2EDuration="57.88657773s" podCreationTimestamp="2025-12-10 00:53:57 +0000 UTC" firstStartedPulling="2025-12-10 00:54:30.763602981 +0000 UTC m=+1443.841560098" lastFinishedPulling="2025-12-10 00:54:52.405522871 +0000 UTC m=+1465.483479998" observedRunningTime="2025-12-10 00:54:54.882989853 +0000 UTC m=+1467.960946990" watchObservedRunningTime="2025-12-10 00:54:54.88657773 +0000 UTC m=+1467.964534867" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.187877 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-8zdxj"] Dec 10 00:54:55 crc kubenswrapper[4884]: E1210 00:54:55.188336 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2c2f4f5-5345-4503-be32-860ebcf42d21" containerName="mariadb-account-create-update" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188355 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2c2f4f5-5345-4503-be32-860ebcf42d21" containerName="mariadb-account-create-update" Dec 10 00:54:55 crc kubenswrapper[4884]: E1210 00:54:55.188367 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43" containerName="mariadb-account-create-update" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188375 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43" containerName="mariadb-account-create-update" Dec 10 00:54:55 crc kubenswrapper[4884]: E1210 00:54:55.188388 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0386097-ea50-4085-a9f0-7fcde7163b25" containerName="mariadb-account-create-update" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188396 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0386097-ea50-4085-a9f0-7fcde7163b25" containerName="mariadb-account-create-update" Dec 10 00:54:55 crc kubenswrapper[4884]: E1210 00:54:55.188415 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67087c97-ae2d-400e-b3a4-455c5eb8f082" containerName="mariadb-account-create-update" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188422 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="67087c97-ae2d-400e-b3a4-455c5eb8f082" containerName="mariadb-account-create-update" Dec 10 00:54:55 crc kubenswrapper[4884]: E1210 00:54:55.188458 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8af1f893-43a8-4ab0-8b44-4f2dd24340b0" containerName="mariadb-database-create" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188468 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8af1f893-43a8-4ab0-8b44-4f2dd24340b0" containerName="mariadb-database-create" Dec 10 00:54:55 crc kubenswrapper[4884]: E1210 00:54:55.188497 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ee033e3-bd08-4fe7-8efb-d6b81e79796c" containerName="mariadb-database-create" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188505 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ee033e3-bd08-4fe7-8efb-d6b81e79796c" containerName="mariadb-database-create" Dec 10 00:54:55 crc kubenswrapper[4884]: E1210 00:54:55.188517 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5b54dd3-a06f-4c77-88d4-88417c5d43eb" containerName="mariadb-database-create" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188524 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5b54dd3-a06f-4c77-88d4-88417c5d43eb" containerName="mariadb-database-create" Dec 10 00:54:55 crc kubenswrapper[4884]: E1210 00:54:55.188535 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e1759e5-b6a9-4815-b344-bc94fb621f14" containerName="mariadb-database-create" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188542 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e1759e5-b6a9-4815-b344-bc94fb621f14" containerName="mariadb-database-create" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188767 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0386097-ea50-4085-a9f0-7fcde7163b25" containerName="mariadb-account-create-update" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188780 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ee033e3-bd08-4fe7-8efb-d6b81e79796c" containerName="mariadb-database-create" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188791 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43" containerName="mariadb-account-create-update" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188804 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8af1f893-43a8-4ab0-8b44-4f2dd24340b0" containerName="mariadb-database-create" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188815 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e1759e5-b6a9-4815-b344-bc94fb621f14" containerName="mariadb-database-create" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188831 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5b54dd3-a06f-4c77-88d4-88417c5d43eb" containerName="mariadb-database-create" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188839 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2c2f4f5-5345-4503-be32-860ebcf42d21" containerName="mariadb-account-create-update" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.188850 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="67087c97-ae2d-400e-b3a4-455c5eb8f082" containerName="mariadb-account-create-update" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.189977 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.192446 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.205065 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-8zdxj"] Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.282227 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jf9j7\" (UniqueName: \"kubernetes.io/projected/e26b622d-9a7a-4af6-9294-bde8eeaa5584-kube-api-access-jf9j7\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.282289 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.282315 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.282332 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.282476 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-config\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.282504 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.384659 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-config\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.384728 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.384823 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jf9j7\" (UniqueName: \"kubernetes.io/projected/e26b622d-9a7a-4af6-9294-bde8eeaa5584-kube-api-access-jf9j7\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.384919 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.384949 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.384973 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.385843 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-config\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.385876 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.386139 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.386201 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.386851 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.405321 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jf9j7\" (UniqueName: \"kubernetes.io/projected/e26b622d-9a7a-4af6-9294-bde8eeaa5584-kube-api-access-jf9j7\") pod \"dnsmasq-dns-5c79d794d7-8zdxj\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.510662 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.861789 4884 generic.go:334] "Generic (PLEG): container finished" podID="290cb163-68fb-48f9-a3d1-695333c2499b" containerID="2c724810b32bcf2a1149726dff4e07751a96cb1a476b47900a738c030fa5eeb1" exitCode=0 Dec 10 00:54:55 crc kubenswrapper[4884]: I1210 00:54:55.861858 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-jczhh" event={"ID":"290cb163-68fb-48f9-a3d1-695333c2499b","Type":"ContainerDied","Data":"2c724810b32bcf2a1149726dff4e07751a96cb1a476b47900a738c030fa5eeb1"} Dec 10 00:54:56 crc kubenswrapper[4884]: I1210 00:54:56.012037 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-8zdxj"] Dec 10 00:54:56 crc kubenswrapper[4884]: I1210 00:54:56.878164 4884 generic.go:334] "Generic (PLEG): container finished" podID="e26b622d-9a7a-4af6-9294-bde8eeaa5584" containerID="d44c2dd9e78fe55d8ac6a3fe2109feb54a6fff207a8af188217309a65f5d4b20" exitCode=0 Dec 10 00:54:56 crc kubenswrapper[4884]: I1210 00:54:56.879421 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" event={"ID":"e26b622d-9a7a-4af6-9294-bde8eeaa5584","Type":"ContainerDied","Data":"d44c2dd9e78fe55d8ac6a3fe2109feb54a6fff207a8af188217309a65f5d4b20"} Dec 10 00:54:56 crc kubenswrapper[4884]: I1210 00:54:56.879498 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" event={"ID":"e26b622d-9a7a-4af6-9294-bde8eeaa5584","Type":"ContainerStarted","Data":"ae72b0fc7d162c57a0e0c537eb478d9c21f1f990fc4053a1a3eed806999977fe"} Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.219401 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.320508 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/290cb163-68fb-48f9-a3d1-695333c2499b-combined-ca-bundle\") pod \"290cb163-68fb-48f9-a3d1-695333c2499b\" (UID: \"290cb163-68fb-48f9-a3d1-695333c2499b\") " Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.320719 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/290cb163-68fb-48f9-a3d1-695333c2499b-config-data\") pod \"290cb163-68fb-48f9-a3d1-695333c2499b\" (UID: \"290cb163-68fb-48f9-a3d1-695333c2499b\") " Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.321280 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctcqk\" (UniqueName: \"kubernetes.io/projected/290cb163-68fb-48f9-a3d1-695333c2499b-kube-api-access-ctcqk\") pod \"290cb163-68fb-48f9-a3d1-695333c2499b\" (UID: \"290cb163-68fb-48f9-a3d1-695333c2499b\") " Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.326996 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/290cb163-68fb-48f9-a3d1-695333c2499b-kube-api-access-ctcqk" (OuterVolumeSpecName: "kube-api-access-ctcqk") pod "290cb163-68fb-48f9-a3d1-695333c2499b" (UID: "290cb163-68fb-48f9-a3d1-695333c2499b"). InnerVolumeSpecName "kube-api-access-ctcqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.357153 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/290cb163-68fb-48f9-a3d1-695333c2499b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "290cb163-68fb-48f9-a3d1-695333c2499b" (UID: "290cb163-68fb-48f9-a3d1-695333c2499b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.382759 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/290cb163-68fb-48f9-a3d1-695333c2499b-config-data" (OuterVolumeSpecName: "config-data") pod "290cb163-68fb-48f9-a3d1-695333c2499b" (UID: "290cb163-68fb-48f9-a3d1-695333c2499b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.427057 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/290cb163-68fb-48f9-a3d1-695333c2499b-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.427116 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctcqk\" (UniqueName: \"kubernetes.io/projected/290cb163-68fb-48f9-a3d1-695333c2499b-kube-api-access-ctcqk\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.427135 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/290cb163-68fb-48f9-a3d1-695333c2499b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.895367 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" event={"ID":"e26b622d-9a7a-4af6-9294-bde8eeaa5584","Type":"ContainerStarted","Data":"3c059e51d118df25118afc9455ae380a51d4874970f5e513b59d6eb1d77bf03d"} Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.895896 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.898710 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-jczhh" event={"ID":"290cb163-68fb-48f9-a3d1-695333c2499b","Type":"ContainerDied","Data":"6a030fdbd76b315415a6290fd423f9ec1a35d5a6ec8d32394eda4ad89dd1a8c0"} Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.898771 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a030fdbd76b315415a6290fd423f9ec1a35d5a6ec8d32394eda4ad89dd1a8c0" Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.898788 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-jczhh" Dec 10 00:54:57 crc kubenswrapper[4884]: I1210 00:54:57.953089 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" podStartSLOduration=2.953068833 podStartE2EDuration="2.953068833s" podCreationTimestamp="2025-12-10 00:54:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:54:57.931594711 +0000 UTC m=+1471.009551918" watchObservedRunningTime="2025-12-10 00:54:57.953068833 +0000 UTC m=+1471.031025960" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.177988 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-7wr27"] Dec 10 00:54:58 crc kubenswrapper[4884]: E1210 00:54:58.178485 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="290cb163-68fb-48f9-a3d1-695333c2499b" containerName="keystone-db-sync" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.178504 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="290cb163-68fb-48f9-a3d1-695333c2499b" containerName="keystone-db-sync" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.178681 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="290cb163-68fb-48f9-a3d1-695333c2499b" containerName="keystone-db-sync" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.179605 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.185832 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.186019 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.186138 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.186313 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.186850 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8z4pv" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.198517 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7wr27"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.212354 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-8zdxj"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.244110 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsws7\" (UniqueName: \"kubernetes.io/projected/c0c53614-a7ff-45aa-bb78-b1b76d53201b-kube-api-access-vsws7\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.244160 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-fernet-keys\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.244209 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-config-data\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.244234 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-combined-ca-bundle\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.244284 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-credential-keys\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.244335 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-scripts\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.256848 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b868669f-9kghs"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.272047 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.315073 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-9kghs"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.326568 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-slxfj"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.328919 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-slxfj" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.331767 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-9kwc7" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.333117 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.349214 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-slxfj"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.362965 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.364146 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsws7\" (UniqueName: \"kubernetes.io/projected/c0c53614-a7ff-45aa-bb78-b1b76d53201b-kube-api-access-vsws7\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.364378 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-fernet-keys\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.364499 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.364671 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.364744 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-config-data\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.365137 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-combined-ca-bundle\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.365244 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gknrx\" (UniqueName: \"kubernetes.io/projected/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-kube-api-access-gknrx\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.365318 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-config\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.365725 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-dns-svc\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.365839 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-credential-keys\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.365970 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-scripts\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.378191 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-scripts\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.378289 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-combined-ca-bundle\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.378355 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-cljgw"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.378971 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-fernet-keys\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.379314 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-config-data\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.379751 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cljgw" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.380958 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-credential-keys\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.381981 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.382261 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.383475 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsws7\" (UniqueName: \"kubernetes.io/projected/c0c53614-a7ff-45aa-bb78-b1b76d53201b-kube-api-access-vsws7\") pod \"keystone-bootstrap-7wr27\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.387869 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-5zlwb" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.388067 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-cljgw"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.471859 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/397f040a-a433-4049-8b8e-fcd35c003b15-combined-ca-bundle\") pod \"neutron-db-sync-cljgw\" (UID: \"397f040a-a433-4049-8b8e-fcd35c003b15\") " pod="openstack/neutron-db-sync-cljgw" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.471928 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.471979 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.471999 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.472018 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175b7d23-6c78-4a15-9f04-f40b83d3a932-combined-ca-bundle\") pod \"heat-db-sync-slxfj\" (UID: \"175b7d23-6c78-4a15-9f04-f40b83d3a932\") " pod="openstack/heat-db-sync-slxfj" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.472036 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5c2v\" (UniqueName: \"kubernetes.io/projected/175b7d23-6c78-4a15-9f04-f40b83d3a932-kube-api-access-s5c2v\") pod \"heat-db-sync-slxfj\" (UID: \"175b7d23-6c78-4a15-9f04-f40b83d3a932\") " pod="openstack/heat-db-sync-slxfj" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.472061 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/397f040a-a433-4049-8b8e-fcd35c003b15-config\") pod \"neutron-db-sync-cljgw\" (UID: \"397f040a-a433-4049-8b8e-fcd35c003b15\") " pod="openstack/neutron-db-sync-cljgw" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.472126 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gknrx\" (UniqueName: \"kubernetes.io/projected/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-kube-api-access-gknrx\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.472166 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-config\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.472188 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-dns-svc\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.472204 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/175b7d23-6c78-4a15-9f04-f40b83d3a932-config-data\") pod \"heat-db-sync-slxfj\" (UID: \"175b7d23-6c78-4a15-9f04-f40b83d3a932\") " pod="openstack/heat-db-sync-slxfj" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.472252 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrwkm\" (UniqueName: \"kubernetes.io/projected/397f040a-a433-4049-8b8e-fcd35c003b15-kube-api-access-rrwkm\") pod \"neutron-db-sync-cljgw\" (UID: \"397f040a-a433-4049-8b8e-fcd35c003b15\") " pod="openstack/neutron-db-sync-cljgw" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.472991 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.475468 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-config\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.478192 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.478954 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.480114 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-brxjz"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.480670 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-dns-svc\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.482138 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.487209 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-mdl82" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.487485 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.488237 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.504304 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.523881 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-brxjz"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.536228 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gknrx\" (UniqueName: \"kubernetes.io/projected/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-kube-api-access-gknrx\") pod \"dnsmasq-dns-5b868669f-9kghs\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.564242 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-fkf86"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.573511 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-scripts\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.573572 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrwkm\" (UniqueName: \"kubernetes.io/projected/397f040a-a433-4049-8b8e-fcd35c003b15-kube-api-access-rrwkm\") pod \"neutron-db-sync-cljgw\" (UID: \"397f040a-a433-4049-8b8e-fcd35c003b15\") " pod="openstack/neutron-db-sync-cljgw" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.573605 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-db-sync-config-data\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.573646 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/397f040a-a433-4049-8b8e-fcd35c003b15-combined-ca-bundle\") pod \"neutron-db-sync-cljgw\" (UID: \"397f040a-a433-4049-8b8e-fcd35c003b15\") " pod="openstack/neutron-db-sync-cljgw" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.573699 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-config-data\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.573734 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flm5p\" (UniqueName: \"kubernetes.io/projected/5aeda9c4-9011-47b9-8083-f0309ed8a010-kube-api-access-flm5p\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.573772 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-combined-ca-bundle\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.573797 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175b7d23-6c78-4a15-9f04-f40b83d3a932-combined-ca-bundle\") pod \"heat-db-sync-slxfj\" (UID: \"175b7d23-6c78-4a15-9f04-f40b83d3a932\") " pod="openstack/heat-db-sync-slxfj" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.573815 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5c2v\" (UniqueName: \"kubernetes.io/projected/175b7d23-6c78-4a15-9f04-f40b83d3a932-kube-api-access-s5c2v\") pod \"heat-db-sync-slxfj\" (UID: \"175b7d23-6c78-4a15-9f04-f40b83d3a932\") " pod="openstack/heat-db-sync-slxfj" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.573840 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/397f040a-a433-4049-8b8e-fcd35c003b15-config\") pod \"neutron-db-sync-cljgw\" (UID: \"397f040a-a433-4049-8b8e-fcd35c003b15\") " pod="openstack/neutron-db-sync-cljgw" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.573864 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/175b7d23-6c78-4a15-9f04-f40b83d3a932-config-data\") pod \"heat-db-sync-slxfj\" (UID: \"175b7d23-6c78-4a15-9f04-f40b83d3a932\") " pod="openstack/heat-db-sync-slxfj" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.573897 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5aeda9c4-9011-47b9-8083-f0309ed8a010-etc-machine-id\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.579740 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-fkf86" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.585045 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175b7d23-6c78-4a15-9f04-f40b83d3a932-combined-ca-bundle\") pod \"heat-db-sync-slxfj\" (UID: \"175b7d23-6c78-4a15-9f04-f40b83d3a932\") " pod="openstack/heat-db-sync-slxfj" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.591392 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/397f040a-a433-4049-8b8e-fcd35c003b15-combined-ca-bundle\") pod \"neutron-db-sync-cljgw\" (UID: \"397f040a-a433-4049-8b8e-fcd35c003b15\") " pod="openstack/neutron-db-sync-cljgw" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.592449 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/397f040a-a433-4049-8b8e-fcd35c003b15-config\") pod \"neutron-db-sync-cljgw\" (UID: \"397f040a-a433-4049-8b8e-fcd35c003b15\") " pod="openstack/neutron-db-sync-cljgw" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.592624 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-bm8kl" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.595800 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.604920 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.630568 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/175b7d23-6c78-4a15-9f04-f40b83d3a932-config-data\") pod \"heat-db-sync-slxfj\" (UID: \"175b7d23-6c78-4a15-9f04-f40b83d3a932\") " pod="openstack/heat-db-sync-slxfj" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.639037 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-fkf86"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.643514 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5c2v\" (UniqueName: \"kubernetes.io/projected/175b7d23-6c78-4a15-9f04-f40b83d3a932-kube-api-access-s5c2v\") pod \"heat-db-sync-slxfj\" (UID: \"175b7d23-6c78-4a15-9f04-f40b83d3a932\") " pod="openstack/heat-db-sync-slxfj" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.660542 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrwkm\" (UniqueName: \"kubernetes.io/projected/397f040a-a433-4049-8b8e-fcd35c003b15-kube-api-access-rrwkm\") pod \"neutron-db-sync-cljgw\" (UID: \"397f040a-a433-4049-8b8e-fcd35c003b15\") " pod="openstack/neutron-db-sync-cljgw" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.662250 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-slxfj" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.665796 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-9kghs"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.675158 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da3a231b-de19-4217-8b1b-54d40e56f0c3-db-sync-config-data\") pod \"barbican-db-sync-fkf86\" (UID: \"da3a231b-de19-4217-8b1b-54d40e56f0c3\") " pod="openstack/barbican-db-sync-fkf86" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.675207 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da3a231b-de19-4217-8b1b-54d40e56f0c3-combined-ca-bundle\") pod \"barbican-db-sync-fkf86\" (UID: \"da3a231b-de19-4217-8b1b-54d40e56f0c3\") " pod="openstack/barbican-db-sync-fkf86" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.675234 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flm5p\" (UniqueName: \"kubernetes.io/projected/5aeda9c4-9011-47b9-8083-f0309ed8a010-kube-api-access-flm5p\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.675267 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-combined-ca-bundle\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.675322 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5aeda9c4-9011-47b9-8083-f0309ed8a010-etc-machine-id\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.675344 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj6l5\" (UniqueName: \"kubernetes.io/projected/da3a231b-de19-4217-8b1b-54d40e56f0c3-kube-api-access-tj6l5\") pod \"barbican-db-sync-fkf86\" (UID: \"da3a231b-de19-4217-8b1b-54d40e56f0c3\") " pod="openstack/barbican-db-sync-fkf86" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.675369 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-scripts\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.675409 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-db-sync-config-data\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.675506 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-config-data\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.677715 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5aeda9c4-9011-47b9-8083-f0309ed8a010-etc-machine-id\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.680357 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-combined-ca-bundle\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.689581 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-scripts\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.690783 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-db-sync-config-data\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.690910 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-w7clk"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.697519 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-config-data\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.701738 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.703520 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flm5p\" (UniqueName: \"kubernetes.io/projected/5aeda9c4-9011-47b9-8083-f0309ed8a010-kube-api-access-flm5p\") pod \"cinder-db-sync-brxjz\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.718828 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-brxjz" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.720516 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.720867 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.720625 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-w7clk"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.720913 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-76ssl" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.778392 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da3a231b-de19-4217-8b1b-54d40e56f0c3-db-sync-config-data\") pod \"barbican-db-sync-fkf86\" (UID: \"da3a231b-de19-4217-8b1b-54d40e56f0c3\") " pod="openstack/barbican-db-sync-fkf86" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.778713 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-scripts\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.778753 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da3a231b-de19-4217-8b1b-54d40e56f0c3-combined-ca-bundle\") pod \"barbican-db-sync-fkf86\" (UID: \"da3a231b-de19-4217-8b1b-54d40e56f0c3\") " pod="openstack/barbican-db-sync-fkf86" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.778846 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj6l5\" (UniqueName: \"kubernetes.io/projected/da3a231b-de19-4217-8b1b-54d40e56f0c3-kube-api-access-tj6l5\") pod \"barbican-db-sync-fkf86\" (UID: \"da3a231b-de19-4217-8b1b-54d40e56f0c3\") " pod="openstack/barbican-db-sync-fkf86" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.778903 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-config-data\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.778923 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-combined-ca-bundle\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.778976 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxw66\" (UniqueName: \"kubernetes.io/projected/0b7ea171-752a-4d11-b544-d5c69f602dd0-kube-api-access-xxw66\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.779026 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b7ea171-752a-4d11-b544-d5c69f602dd0-logs\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.784352 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-dg99w"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.785684 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da3a231b-de19-4217-8b1b-54d40e56f0c3-db-sync-config-data\") pod \"barbican-db-sync-fkf86\" (UID: \"da3a231b-de19-4217-8b1b-54d40e56f0c3\") " pod="openstack/barbican-db-sync-fkf86" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.786789 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da3a231b-de19-4217-8b1b-54d40e56f0c3-combined-ca-bundle\") pod \"barbican-db-sync-fkf86\" (UID: \"da3a231b-de19-4217-8b1b-54d40e56f0c3\") " pod="openstack/barbican-db-sync-fkf86" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.788956 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.812618 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj6l5\" (UniqueName: \"kubernetes.io/projected/da3a231b-de19-4217-8b1b-54d40e56f0c3-kube-api-access-tj6l5\") pod \"barbican-db-sync-fkf86\" (UID: \"da3a231b-de19-4217-8b1b-54d40e56f0c3\") " pod="openstack/barbican-db-sync-fkf86" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.820587 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cljgw" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.843669 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.845899 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.848385 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.849214 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.861130 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.874248 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-dg99w"] Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.882006 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-scripts\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.882097 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wncl4\" (UniqueName: \"kubernetes.io/projected/d0acf48a-d696-430d-8e7e-e92de923c9be-kube-api-access-wncl4\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.882187 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.882228 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.882275 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-config\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.882300 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-config-data\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.882341 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-combined-ca-bundle\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.882363 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-dns-svc\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.882604 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxw66\" (UniqueName: \"kubernetes.io/projected/0b7ea171-752a-4d11-b544-d5c69f602dd0-kube-api-access-xxw66\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.882693 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b7ea171-752a-4d11-b544-d5c69f602dd0-logs\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.882756 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.883637 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b7ea171-752a-4d11-b544-d5c69f602dd0-logs\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.916056 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-config-data\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.924218 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxw66\" (UniqueName: \"kubernetes.io/projected/0b7ea171-752a-4d11-b544-d5c69f602dd0-kube-api-access-xxw66\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.931070 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-combined-ca-bundle\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.932607 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-scripts\") pod \"placement-db-sync-w7clk\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988503 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988579 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/341036d7-5d9f-493a-b043-11d6517c390d-run-httpd\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988610 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wncl4\" (UniqueName: \"kubernetes.io/projected/d0acf48a-d696-430d-8e7e-e92de923c9be-kube-api-access-wncl4\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988635 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-scripts\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988669 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988686 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-config-data\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988703 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnvqb\" (UniqueName: \"kubernetes.io/projected/341036d7-5d9f-493a-b043-11d6517c390d-kube-api-access-tnvqb\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988727 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988762 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988789 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-config\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988813 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-dns-svc\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988840 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/341036d7-5d9f-493a-b043-11d6517c390d-log-httpd\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.988859 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.991776 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.992729 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-config\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:58 crc kubenswrapper[4884]: I1210 00:54:58.993293 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-dns-svc\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:58.999700 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.005204 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.028132 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-fkf86" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.029778 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wncl4\" (UniqueName: \"kubernetes.io/projected/d0acf48a-d696-430d-8e7e-e92de923c9be-kube-api-access-wncl4\") pod \"dnsmasq-dns-cf78879c9-dg99w\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.045808 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-w7clk" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.090273 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/341036d7-5d9f-493a-b043-11d6517c390d-log-httpd\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.090323 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.090423 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/341036d7-5d9f-493a-b043-11d6517c390d-run-httpd\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.090486 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-scripts\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.090526 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-config-data\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.090550 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnvqb\" (UniqueName: \"kubernetes.io/projected/341036d7-5d9f-493a-b043-11d6517c390d-kube-api-access-tnvqb\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.090583 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.092120 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/341036d7-5d9f-493a-b043-11d6517c390d-run-httpd\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.092412 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/341036d7-5d9f-493a-b043-11d6517c390d-log-httpd\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.104992 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-scripts\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.105162 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-config-data\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.105346 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.105726 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.127109 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnvqb\" (UniqueName: \"kubernetes.io/projected/341036d7-5d9f-493a-b043-11d6517c390d-kube-api-access-tnvqb\") pod \"ceilometer-0\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.149064 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.194771 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.260233 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7wr27"] Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.548211 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-9kghs"] Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.589211 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-slxfj"] Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.605865 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-brxjz"] Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.711264 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-cljgw"] Dec 10 00:54:59 crc kubenswrapper[4884]: I1210 00:54:59.934085 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" podUID="e26b622d-9a7a-4af6-9294-bde8eeaa5584" containerName="dnsmasq-dns" containerID="cri-o://3c059e51d118df25118afc9455ae380a51d4874970f5e513b59d6eb1d77bf03d" gracePeriod=10 Dec 10 00:55:00 crc kubenswrapper[4884]: W1210 00:55:00.079608 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d67fbde_7e03_4289_9ccf_c642a2e6b4b7.slice/crio-ff0e783fbebf80bfa322399683e8a6c7cd810b936248979550df03a9a9fdc2da WatchSource:0}: Error finding container ff0e783fbebf80bfa322399683e8a6c7cd810b936248979550df03a9a9fdc2da: Status 404 returned error can't find the container with id ff0e783fbebf80bfa322399683e8a6c7cd810b936248979550df03a9a9fdc2da Dec 10 00:55:00 crc kubenswrapper[4884]: W1210 00:55:00.084056 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod175b7d23_6c78_4a15_9f04_f40b83d3a932.slice/crio-2176c0accb637087952791031d5af0e33458a363bce251b353d48b0b5e8483ed WatchSource:0}: Error finding container 2176c0accb637087952791031d5af0e33458a363bce251b353d48b0b5e8483ed: Status 404 returned error can't find the container with id 2176c0accb637087952791031d5af0e33458a363bce251b353d48b0b5e8483ed Dec 10 00:55:00 crc kubenswrapper[4884]: W1210 00:55:00.087919 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5aeda9c4_9011_47b9_8083_f0309ed8a010.slice/crio-3df0b88fa5fdff682bdb52ae73e8a3c6babca5f1256df23649f4e3fea2782660 WatchSource:0}: Error finding container 3df0b88fa5fdff682bdb52ae73e8a3c6babca5f1256df23649f4e3fea2782660: Status 404 returned error can't find the container with id 3df0b88fa5fdff682bdb52ae73e8a3c6babca5f1256df23649f4e3fea2782660 Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.767125 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-dg99w"] Dec 10 00:55:00 crc kubenswrapper[4884]: E1210 00:55:00.867803 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d67fbde_7e03_4289_9ccf_c642a2e6b4b7.slice/crio-7cd755246b73e942861a3888cde0f6f73d5a81746db73d9692553ba2bb315c8f.scope\": RecentStats: unable to find data in memory cache]" Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.948303 4884 generic.go:334] "Generic (PLEG): container finished" podID="8d67fbde-7e03-4289-9ccf-c642a2e6b4b7" containerID="7cd755246b73e942861a3888cde0f6f73d5a81746db73d9692553ba2bb315c8f" exitCode=0 Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.948586 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-9kghs" event={"ID":"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7","Type":"ContainerDied","Data":"7cd755246b73e942861a3888cde0f6f73d5a81746db73d9692553ba2bb315c8f"} Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.948613 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-9kghs" event={"ID":"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7","Type":"ContainerStarted","Data":"ff0e783fbebf80bfa322399683e8a6c7cd810b936248979550df03a9a9fdc2da"} Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.954332 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-fkf86"] Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.959663 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-brxjz" event={"ID":"5aeda9c4-9011-47b9-8083-f0309ed8a010","Type":"ContainerStarted","Data":"3df0b88fa5fdff682bdb52ae73e8a3c6babca5f1256df23649f4e3fea2782660"} Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.963964 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" event={"ID":"d0acf48a-d696-430d-8e7e-e92de923c9be","Type":"ContainerStarted","Data":"fe01c055399e0efd2e8d351e7cef85dda4cfa2c06a6d31dbbfcbac58bb90cc8b"} Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.965868 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7wr27" event={"ID":"c0c53614-a7ff-45aa-bb78-b1b76d53201b","Type":"ContainerStarted","Data":"637198d9f6a098982ca7f5166f8dbe4b0d6a2c4fbdebbfce96ebaa7f1fb17b94"} Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.965892 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7wr27" event={"ID":"c0c53614-a7ff-45aa-bb78-b1b76d53201b","Type":"ContainerStarted","Data":"c120e442bc97c2b61a43c65423cd733d86cfcf2078ca4eb55099563cdeb5a4a3"} Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.979721 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:55:00 crc kubenswrapper[4884]: W1210 00:55:00.986153 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda3a231b_de19_4217_8b1b_54d40e56f0c3.slice/crio-e8acecb5c63745f293e125a8d27d7dca2abab88f80c664b0999d87a711aa4e99 WatchSource:0}: Error finding container e8acecb5c63745f293e125a8d27d7dca2abab88f80c664b0999d87a711aa4e99: Status 404 returned error can't find the container with id e8acecb5c63745f293e125a8d27d7dca2abab88f80c664b0999d87a711aa4e99 Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.988803 4884 generic.go:334] "Generic (PLEG): container finished" podID="e26b622d-9a7a-4af6-9294-bde8eeaa5584" containerID="3c059e51d118df25118afc9455ae380a51d4874970f5e513b59d6eb1d77bf03d" exitCode=0 Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.988862 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" event={"ID":"e26b622d-9a7a-4af6-9294-bde8eeaa5584","Type":"ContainerDied","Data":"3c059e51d118df25118afc9455ae380a51d4874970f5e513b59d6eb1d77bf03d"} Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.990244 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cljgw" event={"ID":"397f040a-a433-4049-8b8e-fcd35c003b15","Type":"ContainerStarted","Data":"506fc9ae052bbb238d93afe313442339b94b0d4e58ec3e81358996a6eb179396"} Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.990273 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cljgw" event={"ID":"397f040a-a433-4049-8b8e-fcd35c003b15","Type":"ContainerStarted","Data":"108764f14902c0d85f041fd04a0de302cd5db71a4704f3c784439fd5e74f08b4"} Dec 10 00:55:00 crc kubenswrapper[4884]: I1210 00:55:00.993040 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-slxfj" event={"ID":"175b7d23-6c78-4a15-9f04-f40b83d3a932","Type":"ContainerStarted","Data":"2176c0accb637087952791031d5af0e33458a363bce251b353d48b0b5e8483ed"} Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.008159 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-7wr27" podStartSLOduration=3.008133416 podStartE2EDuration="3.008133416s" podCreationTimestamp="2025-12-10 00:54:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:00.99129634 +0000 UTC m=+1474.069253457" watchObservedRunningTime="2025-12-10 00:55:01.008133416 +0000 UTC m=+1474.086090533" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.032913 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-cljgw" podStartSLOduration=3.032891087 podStartE2EDuration="3.032891087s" podCreationTimestamp="2025-12-10 00:54:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:01.016987416 +0000 UTC m=+1474.094944533" watchObservedRunningTime="2025-12-10 00:55:01.032891087 +0000 UTC m=+1474.110848204" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.080620 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.139867 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-config\") pod \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.139906 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-ovsdbserver-nb\") pod \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.140071 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jf9j7\" (UniqueName: \"kubernetes.io/projected/e26b622d-9a7a-4af6-9294-bde8eeaa5584-kube-api-access-jf9j7\") pod \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.140191 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-dns-svc\") pod \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.140241 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-dns-swift-storage-0\") pod \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.140269 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-ovsdbserver-sb\") pod \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\" (UID: \"e26b622d-9a7a-4af6-9294-bde8eeaa5584\") " Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.153780 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-w7clk"] Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.154715 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e26b622d-9a7a-4af6-9294-bde8eeaa5584-kube-api-access-jf9j7" (OuterVolumeSpecName: "kube-api-access-jf9j7") pod "e26b622d-9a7a-4af6-9294-bde8eeaa5584" (UID: "e26b622d-9a7a-4af6-9294-bde8eeaa5584"). InnerVolumeSpecName "kube-api-access-jf9j7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.171408 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:55:01 crc kubenswrapper[4884]: W1210 00:55:01.242970 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod341036d7_5d9f_493a_b043_11d6517c390d.slice/crio-c0783c4de94b5a22879e3effd09a5bb4244bd392c93af4b757cd87bdf34a5e19 WatchSource:0}: Error finding container c0783c4de94b5a22879e3effd09a5bb4244bd392c93af4b757cd87bdf34a5e19: Status 404 returned error can't find the container with id c0783c4de94b5a22879e3effd09a5bb4244bd392c93af4b757cd87bdf34a5e19 Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.243265 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jf9j7\" (UniqueName: \"kubernetes.io/projected/e26b622d-9a7a-4af6-9294-bde8eeaa5584-kube-api-access-jf9j7\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.280617 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.325660 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e26b622d-9a7a-4af6-9294-bde8eeaa5584" (UID: "e26b622d-9a7a-4af6-9294-bde8eeaa5584"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.344793 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-dns-svc\") pod \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.344865 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-config\") pod \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.345015 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-ovsdbserver-nb\") pod \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.345078 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-dns-swift-storage-0\") pod \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.345144 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-ovsdbserver-sb\") pod \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.345181 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gknrx\" (UniqueName: \"kubernetes.io/projected/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-kube-api-access-gknrx\") pod \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\" (UID: \"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7\") " Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.346613 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.348881 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e26b622d-9a7a-4af6-9294-bde8eeaa5584" (UID: "e26b622d-9a7a-4af6-9294-bde8eeaa5584"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.352808 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-config" (OuterVolumeSpecName: "config") pod "e26b622d-9a7a-4af6-9294-bde8eeaa5584" (UID: "e26b622d-9a7a-4af6-9294-bde8eeaa5584"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.354818 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-kube-api-access-gknrx" (OuterVolumeSpecName: "kube-api-access-gknrx") pod "8d67fbde-7e03-4289-9ccf-c642a2e6b4b7" (UID: "8d67fbde-7e03-4289-9ccf-c642a2e6b4b7"). InnerVolumeSpecName "kube-api-access-gknrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.365292 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e26b622d-9a7a-4af6-9294-bde8eeaa5584" (UID: "e26b622d-9a7a-4af6-9294-bde8eeaa5584"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.378152 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e26b622d-9a7a-4af6-9294-bde8eeaa5584" (UID: "e26b622d-9a7a-4af6-9294-bde8eeaa5584"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.388852 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8d67fbde-7e03-4289-9ccf-c642a2e6b4b7" (UID: "8d67fbde-7e03-4289-9ccf-c642a2e6b4b7"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.390012 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8d67fbde-7e03-4289-9ccf-c642a2e6b4b7" (UID: "8d67fbde-7e03-4289-9ccf-c642a2e6b4b7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.398074 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8d67fbde-7e03-4289-9ccf-c642a2e6b4b7" (UID: "8d67fbde-7e03-4289-9ccf-c642a2e6b4b7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.413643 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-config" (OuterVolumeSpecName: "config") pod "8d67fbde-7e03-4289-9ccf-c642a2e6b4b7" (UID: "8d67fbde-7e03-4289-9ccf-c642a2e6b4b7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.430649 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8d67fbde-7e03-4289-9ccf-c642a2e6b4b7" (UID: "8d67fbde-7e03-4289-9ccf-c642a2e6b4b7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.447928 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.447961 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.447975 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.447984 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.447992 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.448001 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gknrx\" (UniqueName: \"kubernetes.io/projected/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-kube-api-access-gknrx\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.448010 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.448019 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e26b622d-9a7a-4af6-9294-bde8eeaa5584-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.448028 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:01 crc kubenswrapper[4884]: I1210 00:55:01.448036 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.014442 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" event={"ID":"e26b622d-9a7a-4af6-9294-bde8eeaa5584","Type":"ContainerDied","Data":"ae72b0fc7d162c57a0e0c537eb478d9c21f1f990fc4053a1a3eed806999977fe"} Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.014526 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-8zdxj" Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.014581 4884 scope.go:117] "RemoveContainer" containerID="3c059e51d118df25118afc9455ae380a51d4874970f5e513b59d6eb1d77bf03d" Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.020994 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-9kghs" event={"ID":"8d67fbde-7e03-4289-9ccf-c642a2e6b4b7","Type":"ContainerDied","Data":"ff0e783fbebf80bfa322399683e8a6c7cd810b936248979550df03a9a9fdc2da"} Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.021088 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-9kghs" Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.043783 4884 generic.go:334] "Generic (PLEG): container finished" podID="d0acf48a-d696-430d-8e7e-e92de923c9be" containerID="dc8e5b214528dda8fbcdbd2860657c9f1545b2d81a0ce8aa1257ca78bd491600" exitCode=0 Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.043954 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" event={"ID":"d0acf48a-d696-430d-8e7e-e92de923c9be","Type":"ContainerDied","Data":"dc8e5b214528dda8fbcdbd2860657c9f1545b2d81a0ce8aa1257ca78bd491600"} Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.045635 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-fkf86" event={"ID":"da3a231b-de19-4217-8b1b-54d40e56f0c3","Type":"ContainerStarted","Data":"e8acecb5c63745f293e125a8d27d7dca2abab88f80c664b0999d87a711aa4e99"} Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.047383 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-w7clk" event={"ID":"0b7ea171-752a-4d11-b544-d5c69f602dd0","Type":"ContainerStarted","Data":"0363f38f6603f1729b31e59ac2ac6a42f4ca2a34bf4c8965008b966c5ff0fb27"} Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.050591 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"341036d7-5d9f-493a-b043-11d6517c390d","Type":"ContainerStarted","Data":"c0783c4de94b5a22879e3effd09a5bb4244bd392c93af4b757cd87bdf34a5e19"} Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.095536 4884 scope.go:117] "RemoveContainer" containerID="d44c2dd9e78fe55d8ac6a3fe2109feb54a6fff207a8af188217309a65f5d4b20" Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.134899 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-9kghs"] Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.143409 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-9kghs"] Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.165813 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-8zdxj"] Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.173927 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-8zdxj"] Dec 10 00:55:02 crc kubenswrapper[4884]: I1210 00:55:02.225968 4884 scope.go:117] "RemoveContainer" containerID="7cd755246b73e942861a3888cde0f6f73d5a81746db73d9692553ba2bb315c8f" Dec 10 00:55:03 crc kubenswrapper[4884]: I1210 00:55:03.080242 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" event={"ID":"d0acf48a-d696-430d-8e7e-e92de923c9be","Type":"ContainerStarted","Data":"2999aa7691d4898c99efc6e61802a4174aad4843c559e9ba740cb505b7752a2d"} Dec 10 00:55:03 crc kubenswrapper[4884]: I1210 00:55:03.080478 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:55:03 crc kubenswrapper[4884]: I1210 00:55:03.102286 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bd4dd" event={"ID":"db50dd06-d67c-468e-88de-6a8fb86bd1bd","Type":"ContainerStarted","Data":"7fa3908321a1b04531697e7e1891c7c5ca5d267f890a92f0be0cdb5c708cd201"} Dec 10 00:55:03 crc kubenswrapper[4884]: I1210 00:55:03.114716 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" podStartSLOduration=5.114697564 podStartE2EDuration="5.114697564s" podCreationTimestamp="2025-12-10 00:54:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:03.11233609 +0000 UTC m=+1476.190293217" watchObservedRunningTime="2025-12-10 00:55:03.114697564 +0000 UTC m=+1476.192654681" Dec 10 00:55:03 crc kubenswrapper[4884]: I1210 00:55:03.174742 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-bd4dd" podStartSLOduration=2.522239422 podStartE2EDuration="38.17472079s" podCreationTimestamp="2025-12-10 00:54:25 +0000 UTC" firstStartedPulling="2025-12-10 00:54:26.581044834 +0000 UTC m=+1439.659001951" lastFinishedPulling="2025-12-10 00:55:02.233526202 +0000 UTC m=+1475.311483319" observedRunningTime="2025-12-10 00:55:03.160190647 +0000 UTC m=+1476.238147774" watchObservedRunningTime="2025-12-10 00:55:03.17472079 +0000 UTC m=+1476.252677907" Dec 10 00:55:03 crc kubenswrapper[4884]: I1210 00:55:03.305424 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d67fbde-7e03-4289-9ccf-c642a2e6b4b7" path="/var/lib/kubelet/pods/8d67fbde-7e03-4289-9ccf-c642a2e6b4b7/volumes" Dec 10 00:55:03 crc kubenswrapper[4884]: I1210 00:55:03.305960 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e26b622d-9a7a-4af6-9294-bde8eeaa5584" path="/var/lib/kubelet/pods/e26b622d-9a7a-4af6-9294-bde8eeaa5584/volumes" Dec 10 00:55:05 crc kubenswrapper[4884]: I1210 00:55:05.124361 4884 generic.go:334] "Generic (PLEG): container finished" podID="c0c53614-a7ff-45aa-bb78-b1b76d53201b" containerID="637198d9f6a098982ca7f5166f8dbe4b0d6a2c4fbdebbfce96ebaa7f1fb17b94" exitCode=0 Dec 10 00:55:05 crc kubenswrapper[4884]: I1210 00:55:05.124571 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7wr27" event={"ID":"c0c53614-a7ff-45aa-bb78-b1b76d53201b","Type":"ContainerDied","Data":"637198d9f6a098982ca7f5166f8dbe4b0d6a2c4fbdebbfce96ebaa7f1fb17b94"} Dec 10 00:55:09 crc kubenswrapper[4884]: I1210 00:55:09.160623 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:55:09 crc kubenswrapper[4884]: I1210 00:55:09.239016 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-drtcs"] Dec 10 00:55:09 crc kubenswrapper[4884]: I1210 00:55:09.239280 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" podUID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" containerName="dnsmasq-dns" containerID="cri-o://2913a7ae2ba1c8a32103b08466fd0c5b5f862ef57ed46214644ecbc07a71aa9a" gracePeriod=10 Dec 10 00:55:10 crc kubenswrapper[4884]: I1210 00:55:10.175920 4884 generic.go:334] "Generic (PLEG): container finished" podID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" containerID="2913a7ae2ba1c8a32103b08466fd0c5b5f862ef57ed46214644ecbc07a71aa9a" exitCode=0 Dec 10 00:55:10 crc kubenswrapper[4884]: I1210 00:55:10.175963 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" event={"ID":"20a98971-47d1-49fe-b09b-84acaa9b4c6f","Type":"ContainerDied","Data":"2913a7ae2ba1c8a32103b08466fd0c5b5f862ef57ed46214644ecbc07a71aa9a"} Dec 10 00:55:12 crc kubenswrapper[4884]: I1210 00:55:12.296379 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" podUID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.136:5353: connect: connection refused" Dec 10 00:55:16 crc kubenswrapper[4884]: E1210 00:55:16.219130 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Dec 10 00:55:16 crc kubenswrapper[4884]: E1210 00:55:16.220081 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xxw66,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-w7clk_openstack(0b7ea171-752a-4d11-b544-d5c69f602dd0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:55:16 crc kubenswrapper[4884]: E1210 00:55:16.221512 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-w7clk" podUID="0b7ea171-752a-4d11-b544-d5c69f602dd0" Dec 10 00:55:16 crc kubenswrapper[4884]: E1210 00:55:16.267553 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-w7clk" podUID="0b7ea171-752a-4d11-b544-d5c69f602dd0" Dec 10 00:55:17 crc kubenswrapper[4884]: I1210 00:55:17.282569 4884 generic.go:334] "Generic (PLEG): container finished" podID="db50dd06-d67c-468e-88de-6a8fb86bd1bd" containerID="7fa3908321a1b04531697e7e1891c7c5ca5d267f890a92f0be0cdb5c708cd201" exitCode=0 Dec 10 00:55:17 crc kubenswrapper[4884]: I1210 00:55:17.282711 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bd4dd" event={"ID":"db50dd06-d67c-468e-88de-6a8fb86bd1bd","Type":"ContainerDied","Data":"7fa3908321a1b04531697e7e1891c7c5ca5d267f890a92f0be0cdb5c708cd201"} Dec 10 00:55:17 crc kubenswrapper[4884]: I1210 00:55:17.300552 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" podUID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.136:5353: connect: connection refused" Dec 10 00:55:17 crc kubenswrapper[4884]: I1210 00:55:17.939893 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.097877 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.097929 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.119814 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-scripts\") pod \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.119875 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-fernet-keys\") pod \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.119926 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-credential-keys\") pod \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.119966 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-config-data\") pod \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.120036 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-combined-ca-bundle\") pod \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.120103 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsws7\" (UniqueName: \"kubernetes.io/projected/c0c53614-a7ff-45aa-bb78-b1b76d53201b-kube-api-access-vsws7\") pod \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\" (UID: \"c0c53614-a7ff-45aa-bb78-b1b76d53201b\") " Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.126526 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-scripts" (OuterVolumeSpecName: "scripts") pod "c0c53614-a7ff-45aa-bb78-b1b76d53201b" (UID: "c0c53614-a7ff-45aa-bb78-b1b76d53201b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.127169 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "c0c53614-a7ff-45aa-bb78-b1b76d53201b" (UID: "c0c53614-a7ff-45aa-bb78-b1b76d53201b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.127230 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0c53614-a7ff-45aa-bb78-b1b76d53201b-kube-api-access-vsws7" (OuterVolumeSpecName: "kube-api-access-vsws7") pod "c0c53614-a7ff-45aa-bb78-b1b76d53201b" (UID: "c0c53614-a7ff-45aa-bb78-b1b76d53201b"). InnerVolumeSpecName "kube-api-access-vsws7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.132634 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "c0c53614-a7ff-45aa-bb78-b1b76d53201b" (UID: "c0c53614-a7ff-45aa-bb78-b1b76d53201b"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.166842 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-config-data" (OuterVolumeSpecName: "config-data") pod "c0c53614-a7ff-45aa-bb78-b1b76d53201b" (UID: "c0c53614-a7ff-45aa-bb78-b1b76d53201b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.174984 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0c53614-a7ff-45aa-bb78-b1b76d53201b" (UID: "c0c53614-a7ff-45aa-bb78-b1b76d53201b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.225528 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsws7\" (UniqueName: \"kubernetes.io/projected/c0c53614-a7ff-45aa-bb78-b1b76d53201b-kube-api-access-vsws7\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.225801 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.225939 4884 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.226002 4884 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.226111 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.226170 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c53614-a7ff-45aa-bb78-b1b76d53201b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.293930 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7wr27" event={"ID":"c0c53614-a7ff-45aa-bb78-b1b76d53201b","Type":"ContainerDied","Data":"c120e442bc97c2b61a43c65423cd733d86cfcf2078ca4eb55099563cdeb5a4a3"} Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.293992 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c120e442bc97c2b61a43c65423cd733d86cfcf2078ca4eb55099563cdeb5a4a3" Dec 10 00:55:18 crc kubenswrapper[4884]: I1210 00:55:18.293958 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7wr27" Dec 10 00:55:18 crc kubenswrapper[4884]: E1210 00:55:18.324093 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Dec 10 00:55:18 crc kubenswrapper[4884]: E1210 00:55:18.324233 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n545h5bfh8fh65bh55fh645h559h5fbh555h5b7h5ch6bh6ch64hbbh67h54dhc6h57ch55ch545h64fh5fch5b8h669hffh649h75hbch56fh5cchb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tnvqb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(341036d7-5d9f-493a-b043-11d6517c390d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.124106 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-7wr27"] Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.137828 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-7wr27"] Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.226100 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-hr4wb"] Dec 10 00:55:19 crc kubenswrapper[4884]: E1210 00:55:19.226604 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e26b622d-9a7a-4af6-9294-bde8eeaa5584" containerName="dnsmasq-dns" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.226629 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e26b622d-9a7a-4af6-9294-bde8eeaa5584" containerName="dnsmasq-dns" Dec 10 00:55:19 crc kubenswrapper[4884]: E1210 00:55:19.226676 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0c53614-a7ff-45aa-bb78-b1b76d53201b" containerName="keystone-bootstrap" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.226687 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0c53614-a7ff-45aa-bb78-b1b76d53201b" containerName="keystone-bootstrap" Dec 10 00:55:19 crc kubenswrapper[4884]: E1210 00:55:19.226697 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e26b622d-9a7a-4af6-9294-bde8eeaa5584" containerName="init" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.226706 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e26b622d-9a7a-4af6-9294-bde8eeaa5584" containerName="init" Dec 10 00:55:19 crc kubenswrapper[4884]: E1210 00:55:19.226723 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d67fbde-7e03-4289-9ccf-c642a2e6b4b7" containerName="init" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.226731 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d67fbde-7e03-4289-9ccf-c642a2e6b4b7" containerName="init" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.226956 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e26b622d-9a7a-4af6-9294-bde8eeaa5584" containerName="dnsmasq-dns" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.226990 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d67fbde-7e03-4289-9ccf-c642a2e6b4b7" containerName="init" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.227006 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0c53614-a7ff-45aa-bb78-b1b76d53201b" containerName="keystone-bootstrap" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.227785 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.230558 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.230617 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.230648 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8z4pv" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.230575 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.230899 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.236636 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hr4wb"] Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.324972 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0c53614-a7ff-45aa-bb78-b1b76d53201b" path="/var/lib/kubelet/pods/c0c53614-a7ff-45aa-bb78-b1b76d53201b/volumes" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.345169 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-fernet-keys\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.345228 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-combined-ca-bundle\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.345252 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-config-data\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.345334 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jh4kr\" (UniqueName: \"kubernetes.io/projected/6943c1e6-d468-45b6-a2f7-831a6961e64a-kube-api-access-jh4kr\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.345357 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-scripts\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.345543 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-credential-keys\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.447048 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-fernet-keys\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.447155 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-combined-ca-bundle\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.447183 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-config-data\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.447241 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jh4kr\" (UniqueName: \"kubernetes.io/projected/6943c1e6-d468-45b6-a2f7-831a6961e64a-kube-api-access-jh4kr\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.447269 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-scripts\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.447310 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-credential-keys\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.453503 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-combined-ca-bundle\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.454720 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-fernet-keys\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.454817 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-config-data\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.465591 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-scripts\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.466291 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-credential-keys\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.469806 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jh4kr\" (UniqueName: \"kubernetes.io/projected/6943c1e6-d468-45b6-a2f7-831a6961e64a-kube-api-access-jh4kr\") pod \"keystone-bootstrap-hr4wb\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:19 crc kubenswrapper[4884]: I1210 00:55:19.551832 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:20 crc kubenswrapper[4884]: I1210 00:55:20.332120 4884 generic.go:334] "Generic (PLEG): container finished" podID="397f040a-a433-4049-8b8e-fcd35c003b15" containerID="506fc9ae052bbb238d93afe313442339b94b0d4e58ec3e81358996a6eb179396" exitCode=0 Dec 10 00:55:20 crc kubenswrapper[4884]: I1210 00:55:20.332158 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cljgw" event={"ID":"397f040a-a433-4049-8b8e-fcd35c003b15","Type":"ContainerDied","Data":"506fc9ae052bbb238d93afe313442339b94b0d4e58ec3e81358996a6eb179396"} Dec 10 00:55:25 crc kubenswrapper[4884]: E1210 00:55:25.819447 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified" Dec 10 00:55:25 crc kubenswrapper[4884]: E1210 00:55:25.820113 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s5c2v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-slxfj_openstack(175b7d23-6c78-4a15-9f04-f40b83d3a932): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:55:25 crc kubenswrapper[4884]: E1210 00:55:25.821305 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-slxfj" podUID="175b7d23-6c78-4a15-9f04-f40b83d3a932" Dec 10 00:55:26 crc kubenswrapper[4884]: E1210 00:55:26.188464 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Dec 10 00:55:26 crc kubenswrapper[4884]: E1210 00:55:26.188909 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tj6l5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-fkf86_openstack(da3a231b-de19-4217-8b1b-54d40e56f0c3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:55:26 crc kubenswrapper[4884]: E1210 00:55:26.190410 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-fkf86" podUID="da3a231b-de19-4217-8b1b-54d40e56f0c3" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.297129 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cljgw" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.304025 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bd4dd" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.435304 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-bd4dd" event={"ID":"db50dd06-d67c-468e-88de-6a8fb86bd1bd","Type":"ContainerDied","Data":"ebc911aa90c4189eab34474f53da7da8538de5caf4a8a92f165e8c305d2b3fe0"} Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.435373 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ebc911aa90c4189eab34474f53da7da8538de5caf4a8a92f165e8c305d2b3fe0" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.435483 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-bd4dd" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.471769 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-cljgw" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.471976 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-cljgw" event={"ID":"397f040a-a433-4049-8b8e-fcd35c003b15","Type":"ContainerDied","Data":"108764f14902c0d85f041fd04a0de302cd5db71a4704f3c784439fd5e74f08b4"} Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.472005 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="108764f14902c0d85f041fd04a0de302cd5db71a4704f3c784439fd5e74f08b4" Dec 10 00:55:26 crc kubenswrapper[4884]: E1210 00:55:26.473186 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified\\\"\"" pod="openstack/heat-db-sync-slxfj" podUID="175b7d23-6c78-4a15-9f04-f40b83d3a932" Dec 10 00:55:26 crc kubenswrapper[4884]: E1210 00:55:26.473360 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-fkf86" podUID="da3a231b-de19-4217-8b1b-54d40e56f0c3" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.492176 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrwkm\" (UniqueName: \"kubernetes.io/projected/397f040a-a433-4049-8b8e-fcd35c003b15-kube-api-access-rrwkm\") pod \"397f040a-a433-4049-8b8e-fcd35c003b15\" (UID: \"397f040a-a433-4049-8b8e-fcd35c003b15\") " Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.492312 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-combined-ca-bundle\") pod \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.492347 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/397f040a-a433-4049-8b8e-fcd35c003b15-config\") pod \"397f040a-a433-4049-8b8e-fcd35c003b15\" (UID: \"397f040a-a433-4049-8b8e-fcd35c003b15\") " Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.492413 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/397f040a-a433-4049-8b8e-fcd35c003b15-combined-ca-bundle\") pod \"397f040a-a433-4049-8b8e-fcd35c003b15\" (UID: \"397f040a-a433-4049-8b8e-fcd35c003b15\") " Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.492484 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-config-data\") pod \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.492513 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtcgj\" (UniqueName: \"kubernetes.io/projected/db50dd06-d67c-468e-88de-6a8fb86bd1bd-kube-api-access-gtcgj\") pod \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.492650 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-db-sync-config-data\") pod \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\" (UID: \"db50dd06-d67c-468e-88de-6a8fb86bd1bd\") " Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.499511 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/397f040a-a433-4049-8b8e-fcd35c003b15-kube-api-access-rrwkm" (OuterVolumeSpecName: "kube-api-access-rrwkm") pod "397f040a-a433-4049-8b8e-fcd35c003b15" (UID: "397f040a-a433-4049-8b8e-fcd35c003b15"). InnerVolumeSpecName "kube-api-access-rrwkm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.501932 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db50dd06-d67c-468e-88de-6a8fb86bd1bd-kube-api-access-gtcgj" (OuterVolumeSpecName: "kube-api-access-gtcgj") pod "db50dd06-d67c-468e-88de-6a8fb86bd1bd" (UID: "db50dd06-d67c-468e-88de-6a8fb86bd1bd"). InnerVolumeSpecName "kube-api-access-gtcgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.523667 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "db50dd06-d67c-468e-88de-6a8fb86bd1bd" (UID: "db50dd06-d67c-468e-88de-6a8fb86bd1bd"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.530992 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "db50dd06-d67c-468e-88de-6a8fb86bd1bd" (UID: "db50dd06-d67c-468e-88de-6a8fb86bd1bd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.542473 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/397f040a-a433-4049-8b8e-fcd35c003b15-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "397f040a-a433-4049-8b8e-fcd35c003b15" (UID: "397f040a-a433-4049-8b8e-fcd35c003b15"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.542378 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/397f040a-a433-4049-8b8e-fcd35c003b15-config" (OuterVolumeSpecName: "config") pod "397f040a-a433-4049-8b8e-fcd35c003b15" (UID: "397f040a-a433-4049-8b8e-fcd35c003b15"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.570036 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-config-data" (OuterVolumeSpecName: "config-data") pod "db50dd06-d67c-468e-88de-6a8fb86bd1bd" (UID: "db50dd06-d67c-468e-88de-6a8fb86bd1bd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.596209 4884 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.596236 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrwkm\" (UniqueName: \"kubernetes.io/projected/397f040a-a433-4049-8b8e-fcd35c003b15-kube-api-access-rrwkm\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.596246 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.596255 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/397f040a-a433-4049-8b8e-fcd35c003b15-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.596263 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/397f040a-a433-4049-8b8e-fcd35c003b15-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.596271 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db50dd06-d67c-468e-88de-6a8fb86bd1bd-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:26 crc kubenswrapper[4884]: I1210 00:55:26.596279 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtcgj\" (UniqueName: \"kubernetes.io/projected/db50dd06-d67c-468e-88de-6a8fb86bd1bd-kube-api-access-gtcgj\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.295504 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" podUID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.136:5353: i/o timeout" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.301452 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.574423 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79cd4f6685-cshdh"] Dec 10 00:55:27 crc kubenswrapper[4884]: E1210 00:55:27.574864 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="397f040a-a433-4049-8b8e-fcd35c003b15" containerName="neutron-db-sync" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.574884 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="397f040a-a433-4049-8b8e-fcd35c003b15" containerName="neutron-db-sync" Dec 10 00:55:27 crc kubenswrapper[4884]: E1210 00:55:27.574911 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db50dd06-d67c-468e-88de-6a8fb86bd1bd" containerName="glance-db-sync" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.574918 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="db50dd06-d67c-468e-88de-6a8fb86bd1bd" containerName="glance-db-sync" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.575117 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="397f040a-a433-4049-8b8e-fcd35c003b15" containerName="neutron-db-sync" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.575133 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="db50dd06-d67c-468e-88de-6a8fb86bd1bd" containerName="glance-db-sync" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.578359 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.596453 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79cd4f6685-cshdh"] Dec 10 00:55:27 crc kubenswrapper[4884]: E1210 00:55:27.698662 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Dec 10 00:55:27 crc kubenswrapper[4884]: E1210 00:55:27.699162 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-flm5p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-brxjz_openstack(5aeda9c4-9011-47b9-8083-f0309ed8a010): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 10 00:55:27 crc kubenswrapper[4884]: E1210 00:55:27.700842 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-brxjz" podUID="5aeda9c4-9011-47b9-8083-f0309ed8a010" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.718533 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-dns-swift-storage-0\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.718616 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-dns-svc\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.718644 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-config\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.718667 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67qlz\" (UniqueName: \"kubernetes.io/projected/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-kube-api-access-67qlz\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.718687 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-ovsdbserver-nb\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.718712 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-ovsdbserver-sb\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.733669 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.798125 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7c6d9d796d-n2w9w"] Dec 10 00:55:27 crc kubenswrapper[4884]: E1210 00:55:27.798623 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" containerName="dnsmasq-dns" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.798647 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" containerName="dnsmasq-dns" Dec 10 00:55:27 crc kubenswrapper[4884]: E1210 00:55:27.798669 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" containerName="init" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.798678 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" containerName="init" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.798904 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" containerName="dnsmasq-dns" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.803735 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.808984 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-5zlwb" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.809233 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.809509 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.809695 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.824762 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-combined-ca-bundle\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.824911 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-httpd-config\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.824962 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgm9s\" (UniqueName: \"kubernetes.io/projected/1604e8e1-2382-4aa4-b645-332d4c1c00b1-kube-api-access-jgm9s\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.824989 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-ovndb-tls-certs\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.825026 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-dns-swift-storage-0\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.825074 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-config\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.825114 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-dns-svc\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.825150 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-config\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.825182 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67qlz\" (UniqueName: \"kubernetes.io/projected/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-kube-api-access-67qlz\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.825203 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-ovsdbserver-nb\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.825243 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-ovsdbserver-sb\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.825814 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7c6d9d796d-n2w9w"] Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.826086 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-ovsdbserver-sb\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.826504 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-dns-swift-storage-0\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.826647 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-dns-svc\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.827224 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-ovsdbserver-nb\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.827314 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-config\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.855684 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67qlz\" (UniqueName: \"kubernetes.io/projected/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-kube-api-access-67qlz\") pod \"dnsmasq-dns-79cd4f6685-cshdh\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.862919 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79cd4f6685-cshdh"] Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.863706 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.927107 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhwnl\" (UniqueName: \"kubernetes.io/projected/20a98971-47d1-49fe-b09b-84acaa9b4c6f-kube-api-access-bhwnl\") pod \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.927163 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-dns-svc\") pod \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.927186 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-ovsdbserver-sb\") pod \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.927258 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-ovsdbserver-nb\") pod \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.927336 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-config\") pod \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\" (UID: \"20a98971-47d1-49fe-b09b-84acaa9b4c6f\") " Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.927700 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-config\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.927796 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-combined-ca-bundle\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.927851 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-httpd-config\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.927887 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgm9s\" (UniqueName: \"kubernetes.io/projected/1604e8e1-2382-4aa4-b645-332d4c1c00b1-kube-api-access-jgm9s\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.927905 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-ovndb-tls-certs\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.933591 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-combined-ca-bundle\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.941040 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-httpd-config\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.941578 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20a98971-47d1-49fe-b09b-84acaa9b4c6f-kube-api-access-bhwnl" (OuterVolumeSpecName: "kube-api-access-bhwnl") pod "20a98971-47d1-49fe-b09b-84acaa9b4c6f" (UID: "20a98971-47d1-49fe-b09b-84acaa9b4c6f"). InnerVolumeSpecName "kube-api-access-bhwnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.945171 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-gw7fg"] Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.948048 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.950973 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-config\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.957573 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgm9s\" (UniqueName: \"kubernetes.io/projected/1604e8e1-2382-4aa4-b645-332d4c1c00b1-kube-api-access-jgm9s\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.962213 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-gw7fg"] Dec 10 00:55:27 crc kubenswrapper[4884]: I1210 00:55:27.971905 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-ovndb-tls-certs\") pod \"neutron-7c6d9d796d-n2w9w\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.011212 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "20a98971-47d1-49fe-b09b-84acaa9b4c6f" (UID: "20a98971-47d1-49fe-b09b-84acaa9b4c6f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.020213 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "20a98971-47d1-49fe-b09b-84acaa9b4c6f" (UID: "20a98971-47d1-49fe-b09b-84acaa9b4c6f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.027961 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "20a98971-47d1-49fe-b09b-84acaa9b4c6f" (UID: "20a98971-47d1-49fe-b09b-84acaa9b4c6f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.031122 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.031161 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhwnl\" (UniqueName: \"kubernetes.io/projected/20a98971-47d1-49fe-b09b-84acaa9b4c6f-kube-api-access-bhwnl\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.031176 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.031186 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.045716 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-config" (OuterVolumeSpecName: "config") pod "20a98971-47d1-49fe-b09b-84acaa9b4c6f" (UID: "20a98971-47d1-49fe-b09b-84acaa9b4c6f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.137553 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.138701 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-config\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.138738 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-dns-svc\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.138970 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhhv7\" (UniqueName: \"kubernetes.io/projected/3d53bd28-0069-4228-a8dd-e0f065a80bb6-kube-api-access-hhhv7\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.139105 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.139238 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.139312 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.139474 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20a98971-47d1-49fe-b09b-84acaa9b4c6f-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.241231 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.241320 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.241346 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.241426 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-config\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.241467 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-dns-svc\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.241851 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhhv7\" (UniqueName: \"kubernetes.io/projected/3d53bd28-0069-4228-a8dd-e0f065a80bb6-kube-api-access-hhhv7\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.243835 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.244552 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.244662 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.244743 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-config\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.245593 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-dns-svc\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.257740 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhhv7\" (UniqueName: \"kubernetes.io/projected/3d53bd28-0069-4228-a8dd-e0f065a80bb6-kube-api-access-hhhv7\") pod \"dnsmasq-dns-6b7b667979-gw7fg\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.366168 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.492574 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.492573 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" event={"ID":"20a98971-47d1-49fe-b09b-84acaa9b4c6f","Type":"ContainerDied","Data":"c8aeefece315aa386c865813cc08d7fb49694b7dd73f37f834162e36c012a2d1"} Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.492868 4884 scope.go:117] "RemoveContainer" containerID="2913a7ae2ba1c8a32103b08466fd0c5b5f862ef57ed46214644ecbc07a71aa9a" Dec 10 00:55:28 crc kubenswrapper[4884]: E1210 00:55:28.500274 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-brxjz" podUID="5aeda9c4-9011-47b9-8083-f0309ed8a010" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.545876 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-drtcs"] Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.558501 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-drtcs"] Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.568495 4884 scope.go:117] "RemoveContainer" containerID="aeb7b564e24d5cfae85a54d26d75dea336db4620f5dd8a333580ed56343f978d" Dec 10 00:55:28 crc kubenswrapper[4884]: I1210 00:55:28.992165 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hr4wb"] Dec 10 00:55:29 crc kubenswrapper[4884]: W1210 00:55:29.004958 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6943c1e6_d468_45b6_a2f7_831a6961e64a.slice/crio-873bd9afba2bea17a8fdd368449a1bf8ee7b66c7c2307b3c1051ea36f4e692f6 WatchSource:0}: Error finding container 873bd9afba2bea17a8fdd368449a1bf8ee7b66c7c2307b3c1051ea36f4e692f6: Status 404 returned error can't find the container with id 873bd9afba2bea17a8fdd368449a1bf8ee7b66c7c2307b3c1051ea36f4e692f6 Dec 10 00:55:29 crc kubenswrapper[4884]: I1210 00:55:29.009721 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 10 00:55:29 crc kubenswrapper[4884]: I1210 00:55:29.143997 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-gw7fg"] Dec 10 00:55:29 crc kubenswrapper[4884]: I1210 00:55:29.162299 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79cd4f6685-cshdh"] Dec 10 00:55:29 crc kubenswrapper[4884]: W1210 00:55:29.167535 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2625d27e_71f7_4b5d_a2af_175f9f9d27cf.slice/crio-ca03584ca3797dcf618f1a72f41c8923805d1bf82dbf8d7ff58b10c2cb4cf638 WatchSource:0}: Error finding container ca03584ca3797dcf618f1a72f41c8923805d1bf82dbf8d7ff58b10c2cb4cf638: Status 404 returned error can't find the container with id ca03584ca3797dcf618f1a72f41c8923805d1bf82dbf8d7ff58b10c2cb4cf638 Dec 10 00:55:29 crc kubenswrapper[4884]: I1210 00:55:29.231380 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7c6d9d796d-n2w9w"] Dec 10 00:55:29 crc kubenswrapper[4884]: I1210 00:55:29.311773 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" path="/var/lib/kubelet/pods/20a98971-47d1-49fe-b09b-84acaa9b4c6f/volumes" Dec 10 00:55:29 crc kubenswrapper[4884]: I1210 00:55:29.522885 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" event={"ID":"2625d27e-71f7-4b5d-a2af-175f9f9d27cf","Type":"ContainerStarted","Data":"ca03584ca3797dcf618f1a72f41c8923805d1bf82dbf8d7ff58b10c2cb4cf638"} Dec 10 00:55:29 crc kubenswrapper[4884]: I1210 00:55:29.532920 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" event={"ID":"3d53bd28-0069-4228-a8dd-e0f065a80bb6","Type":"ContainerStarted","Data":"ddfb7d392e4fec47a04ab6efa1e3356468fd5b221ff99f344a672ce107ecf095"} Dec 10 00:55:29 crc kubenswrapper[4884]: I1210 00:55:29.543068 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hr4wb" event={"ID":"6943c1e6-d468-45b6-a2f7-831a6961e64a","Type":"ContainerStarted","Data":"873bd9afba2bea17a8fdd368449a1bf8ee7b66c7c2307b3c1051ea36f4e692f6"} Dec 10 00:55:29 crc kubenswrapper[4884]: I1210 00:55:29.554420 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7c6d9d796d-n2w9w" event={"ID":"1604e8e1-2382-4aa4-b645-332d4c1c00b1","Type":"ContainerStarted","Data":"472a3b9222b9953e133d9f48d487da9eef1ea8d8a3a3af82e825c5e764db4f49"} Dec 10 00:55:29 crc kubenswrapper[4884]: I1210 00:55:29.576281 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-hr4wb" podStartSLOduration=10.576262554 podStartE2EDuration="10.576262554s" podCreationTimestamp="2025-12-10 00:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:29.561272208 +0000 UTC m=+1502.639229325" watchObservedRunningTime="2025-12-10 00:55:29.576262554 +0000 UTC m=+1502.654219671" Dec 10 00:55:30 crc kubenswrapper[4884]: I1210 00:55:30.576528 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hr4wb" event={"ID":"6943c1e6-d468-45b6-a2f7-831a6961e64a","Type":"ContainerStarted","Data":"09701bd735e5ab5cb4809ac7a4f35b50a301d0acf567f2f46ba54eaa6943d1a3"} Dec 10 00:55:30 crc kubenswrapper[4884]: I1210 00:55:30.581792 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7c6d9d796d-n2w9w" event={"ID":"1604e8e1-2382-4aa4-b645-332d4c1c00b1","Type":"ContainerStarted","Data":"58071ff80b4d2d77aeda25b3bc4d5e8a137b5952afc30e3397a96bfec551af6e"} Dec 10 00:55:30 crc kubenswrapper[4884]: I1210 00:55:30.581856 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7c6d9d796d-n2w9w" event={"ID":"1604e8e1-2382-4aa4-b645-332d4c1c00b1","Type":"ContainerStarted","Data":"99e32a4abaf2fd173eee66ec1de614db4a9ee006b9e7358e15f01402dca40202"} Dec 10 00:55:30 crc kubenswrapper[4884]: I1210 00:55:30.581922 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:30 crc kubenswrapper[4884]: I1210 00:55:30.585356 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"341036d7-5d9f-493a-b043-11d6517c390d","Type":"ContainerStarted","Data":"da117cab1679c3c345de0912eb6f653eafe1b21c86828fb9318b71dd52095381"} Dec 10 00:55:30 crc kubenswrapper[4884]: I1210 00:55:30.588777 4884 generic.go:334] "Generic (PLEG): container finished" podID="2625d27e-71f7-4b5d-a2af-175f9f9d27cf" containerID="d56ea3f10c004014a9164cc51d27c6edab706b399f0cf0ad3d5a6de9fc908c1a" exitCode=0 Dec 10 00:55:30 crc kubenswrapper[4884]: I1210 00:55:30.588856 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" event={"ID":"2625d27e-71f7-4b5d-a2af-175f9f9d27cf","Type":"ContainerDied","Data":"d56ea3f10c004014a9164cc51d27c6edab706b399f0cf0ad3d5a6de9fc908c1a"} Dec 10 00:55:30 crc kubenswrapper[4884]: I1210 00:55:30.591209 4884 generic.go:334] "Generic (PLEG): container finished" podID="3d53bd28-0069-4228-a8dd-e0f065a80bb6" containerID="582e1e7e5f16e9efe27f36d54d6a8dd1682fddfd64100ac5ba7e44b064c3bea0" exitCode=0 Dec 10 00:55:30 crc kubenswrapper[4884]: I1210 00:55:30.591243 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" event={"ID":"3d53bd28-0069-4228-a8dd-e0f065a80bb6","Type":"ContainerDied","Data":"582e1e7e5f16e9efe27f36d54d6a8dd1682fddfd64100ac5ba7e44b064c3bea0"} Dec 10 00:55:30 crc kubenswrapper[4884]: I1210 00:55:30.605352 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7c6d9d796d-n2w9w" podStartSLOduration=3.605337173 podStartE2EDuration="3.605337173s" podCreationTimestamp="2025-12-10 00:55:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:30.599821673 +0000 UTC m=+1503.677778790" watchObservedRunningTime="2025-12-10 00:55:30.605337173 +0000 UTC m=+1503.683294290" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.040214 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.197476 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-dns-swift-storage-0\") pod \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.197655 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-ovsdbserver-sb\") pod \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.197686 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-dns-svc\") pod \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.197718 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-ovsdbserver-nb\") pod \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.197751 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-config\") pod \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.197848 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67qlz\" (UniqueName: \"kubernetes.io/projected/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-kube-api-access-67qlz\") pod \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\" (UID: \"2625d27e-71f7-4b5d-a2af-175f9f9d27cf\") " Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.203784 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-kube-api-access-67qlz" (OuterVolumeSpecName: "kube-api-access-67qlz") pod "2625d27e-71f7-4b5d-a2af-175f9f9d27cf" (UID: "2625d27e-71f7-4b5d-a2af-175f9f9d27cf"). InnerVolumeSpecName "kube-api-access-67qlz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.228358 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-config" (OuterVolumeSpecName: "config") pod "2625d27e-71f7-4b5d-a2af-175f9f9d27cf" (UID: "2625d27e-71f7-4b5d-a2af-175f9f9d27cf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.229207 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2625d27e-71f7-4b5d-a2af-175f9f9d27cf" (UID: "2625d27e-71f7-4b5d-a2af-175f9f9d27cf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.231828 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2625d27e-71f7-4b5d-a2af-175f9f9d27cf" (UID: "2625d27e-71f7-4b5d-a2af-175f9f9d27cf"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.240581 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2625d27e-71f7-4b5d-a2af-175f9f9d27cf" (UID: "2625d27e-71f7-4b5d-a2af-175f9f9d27cf"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.250060 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2625d27e-71f7-4b5d-a2af-175f9f9d27cf" (UID: "2625d27e-71f7-4b5d-a2af-175f9f9d27cf"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.300510 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.300868 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.300884 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.300899 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.300915 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67qlz\" (UniqueName: \"kubernetes.io/projected/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-kube-api-access-67qlz\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.300949 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2625d27e-71f7-4b5d-a2af-175f9f9d27cf-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.608410 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" event={"ID":"2625d27e-71f7-4b5d-a2af-175f9f9d27cf","Type":"ContainerDied","Data":"ca03584ca3797dcf618f1a72f41c8923805d1bf82dbf8d7ff58b10c2cb4cf638"} Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.608469 4884 scope.go:117] "RemoveContainer" containerID="d56ea3f10c004014a9164cc51d27c6edab706b399f0cf0ad3d5a6de9fc908c1a" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.608582 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79cd4f6685-cshdh" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.611556 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" event={"ID":"3d53bd28-0069-4228-a8dd-e0f065a80bb6","Type":"ContainerStarted","Data":"662cdfc7178497f3ca9035f4105b066b98bed00449bc4aaed874f1bd12f203df"} Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.611796 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.642570 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" podStartSLOduration=4.642543681 podStartE2EDuration="4.642543681s" podCreationTimestamp="2025-12-10 00:55:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:31.635260633 +0000 UTC m=+1504.713217750" watchObservedRunningTime="2025-12-10 00:55:31.642543681 +0000 UTC m=+1504.720500798" Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.710644 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79cd4f6685-cshdh"] Dec 10 00:55:31 crc kubenswrapper[4884]: I1210 00:55:31.719260 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79cd4f6685-cshdh"] Dec 10 00:55:32 crc kubenswrapper[4884]: I1210 00:55:32.301800 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-drtcs" podUID="20a98971-47d1-49fe-b09b-84acaa9b4c6f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.136:5353: i/o timeout" Dec 10 00:55:32 crc kubenswrapper[4884]: I1210 00:55:32.624661 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-w7clk" event={"ID":"0b7ea171-752a-4d11-b544-d5c69f602dd0","Type":"ContainerStarted","Data":"815e6501db560851dd335dc8262103f42f8bd1676a002e7d0597ca529220a1a3"} Dec 10 00:55:32 crc kubenswrapper[4884]: I1210 00:55:32.646187 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-w7clk" podStartSLOduration=3.828775881 podStartE2EDuration="34.64616848s" podCreationTimestamp="2025-12-10 00:54:58 +0000 UTC" firstStartedPulling="2025-12-10 00:55:01.24422377 +0000 UTC m=+1474.322180887" lastFinishedPulling="2025-12-10 00:55:32.061616369 +0000 UTC m=+1505.139573486" observedRunningTime="2025-12-10 00:55:32.642502731 +0000 UTC m=+1505.720459848" watchObservedRunningTime="2025-12-10 00:55:32.64616848 +0000 UTC m=+1505.724125597" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.302299 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2625d27e-71f7-4b5d-a2af-175f9f9d27cf" path="/var/lib/kubelet/pods/2625d27e-71f7-4b5d-a2af-175f9f9d27cf/volumes" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.640133 4884 generic.go:334] "Generic (PLEG): container finished" podID="6943c1e6-d468-45b6-a2f7-831a6961e64a" containerID="09701bd735e5ab5cb4809ac7a4f35b50a301d0acf567f2f46ba54eaa6943d1a3" exitCode=0 Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.640198 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hr4wb" event={"ID":"6943c1e6-d468-45b6-a2f7-831a6961e64a","Type":"ContainerDied","Data":"09701bd735e5ab5cb4809ac7a4f35b50a301d0acf567f2f46ba54eaa6943d1a3"} Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.785168 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7656cd6689-46nqx"] Dec 10 00:55:33 crc kubenswrapper[4884]: E1210 00:55:33.785704 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2625d27e-71f7-4b5d-a2af-175f9f9d27cf" containerName="init" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.785730 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2625d27e-71f7-4b5d-a2af-175f9f9d27cf" containerName="init" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.786031 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2625d27e-71f7-4b5d-a2af-175f9f9d27cf" containerName="init" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.787402 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.789574 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.791813 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.802549 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7656cd6689-46nqx"] Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.855488 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmzb5\" (UniqueName: \"kubernetes.io/projected/f5135453-5688-4022-b536-cdd8c5e62926-kube-api-access-hmzb5\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.856043 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-httpd-config\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.856150 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-config\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.856171 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-ovndb-tls-certs\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.856192 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-internal-tls-certs\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.856221 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-public-tls-certs\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.856372 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-combined-ca-bundle\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.958546 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmzb5\" (UniqueName: \"kubernetes.io/projected/f5135453-5688-4022-b536-cdd8c5e62926-kube-api-access-hmzb5\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.958606 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-httpd-config\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.958741 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-config\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.959831 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-ovndb-tls-certs\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.959867 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-internal-tls-certs\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.959909 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-public-tls-certs\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.959937 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-combined-ca-bundle\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.967520 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-config\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.967561 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-internal-tls-certs\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.967753 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-httpd-config\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.968586 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-combined-ca-bundle\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.969199 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-ovndb-tls-certs\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.972308 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5135453-5688-4022-b536-cdd8c5e62926-public-tls-certs\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:33 crc kubenswrapper[4884]: I1210 00:55:33.983934 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmzb5\" (UniqueName: \"kubernetes.io/projected/f5135453-5688-4022-b536-cdd8c5e62926-kube-api-access-hmzb5\") pod \"neutron-7656cd6689-46nqx\" (UID: \"f5135453-5688-4022-b536-cdd8c5e62926\") " pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:34 crc kubenswrapper[4884]: I1210 00:55:34.126920 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:34 crc kubenswrapper[4884]: I1210 00:55:34.806563 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7656cd6689-46nqx"] Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.066979 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.213382 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-config-data\") pod \"6943c1e6-d468-45b6-a2f7-831a6961e64a\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.213510 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-combined-ca-bundle\") pod \"6943c1e6-d468-45b6-a2f7-831a6961e64a\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.213662 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jh4kr\" (UniqueName: \"kubernetes.io/projected/6943c1e6-d468-45b6-a2f7-831a6961e64a-kube-api-access-jh4kr\") pod \"6943c1e6-d468-45b6-a2f7-831a6961e64a\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.213697 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-fernet-keys\") pod \"6943c1e6-d468-45b6-a2f7-831a6961e64a\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.213742 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-credential-keys\") pod \"6943c1e6-d468-45b6-a2f7-831a6961e64a\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.213760 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-scripts\") pod \"6943c1e6-d468-45b6-a2f7-831a6961e64a\" (UID: \"6943c1e6-d468-45b6-a2f7-831a6961e64a\") " Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.221570 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6943c1e6-d468-45b6-a2f7-831a6961e64a-kube-api-access-jh4kr" (OuterVolumeSpecName: "kube-api-access-jh4kr") pod "6943c1e6-d468-45b6-a2f7-831a6961e64a" (UID: "6943c1e6-d468-45b6-a2f7-831a6961e64a"). InnerVolumeSpecName "kube-api-access-jh4kr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.222841 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6943c1e6-d468-45b6-a2f7-831a6961e64a" (UID: "6943c1e6-d468-45b6-a2f7-831a6961e64a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.236019 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-scripts" (OuterVolumeSpecName: "scripts") pod "6943c1e6-d468-45b6-a2f7-831a6961e64a" (UID: "6943c1e6-d468-45b6-a2f7-831a6961e64a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.237658 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6943c1e6-d468-45b6-a2f7-831a6961e64a" (UID: "6943c1e6-d468-45b6-a2f7-831a6961e64a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.249657 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-config-data" (OuterVolumeSpecName: "config-data") pod "6943c1e6-d468-45b6-a2f7-831a6961e64a" (UID: "6943c1e6-d468-45b6-a2f7-831a6961e64a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.258587 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6943c1e6-d468-45b6-a2f7-831a6961e64a" (UID: "6943c1e6-d468-45b6-a2f7-831a6961e64a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.315555 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jh4kr\" (UniqueName: \"kubernetes.io/projected/6943c1e6-d468-45b6-a2f7-831a6961e64a-kube-api-access-jh4kr\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.315596 4884 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.315609 4884 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.315622 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.315632 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.315643 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6943c1e6-d468-45b6-a2f7-831a6961e64a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.678295 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hr4wb" event={"ID":"6943c1e6-d468-45b6-a2f7-831a6961e64a","Type":"ContainerDied","Data":"873bd9afba2bea17a8fdd368449a1bf8ee7b66c7c2307b3c1051ea36f4e692f6"} Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.678611 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="873bd9afba2bea17a8fdd368449a1bf8ee7b66c7c2307b3c1051ea36f4e692f6" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.678673 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hr4wb" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.681144 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7656cd6689-46nqx" event={"ID":"f5135453-5688-4022-b536-cdd8c5e62926","Type":"ContainerStarted","Data":"df0cb02dac2c1107f9f4c5c30741e005d89e07a359621655288322f8368e8ae2"} Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.793758 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-78b6b775cc-bbrl7"] Dec 10 00:55:35 crc kubenswrapper[4884]: E1210 00:55:35.794447 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6943c1e6-d468-45b6-a2f7-831a6961e64a" containerName="keystone-bootstrap" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.794568 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6943c1e6-d468-45b6-a2f7-831a6961e64a" containerName="keystone-bootstrap" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.794880 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6943c1e6-d468-45b6-a2f7-831a6961e64a" containerName="keystone-bootstrap" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.795745 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.805212 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.805841 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.806026 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.806141 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.806246 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-8z4pv" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.808061 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.854933 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-78b6b775cc-bbrl7"] Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.925636 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-fernet-keys\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.925693 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzb5f\" (UniqueName: \"kubernetes.io/projected/c10e9f86-3844-4b7e-954f-a2f868c86b35-kube-api-access-rzb5f\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.925711 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-credential-keys\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.925769 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-internal-tls-certs\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.925835 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-config-data\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.925876 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-scripts\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.925918 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-public-tls-certs\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:35 crc kubenswrapper[4884]: I1210 00:55:35.925940 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-combined-ca-bundle\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.027562 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-config-data\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.027911 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-scripts\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.028040 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-public-tls-certs\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.028167 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-combined-ca-bundle\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.028310 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-fernet-keys\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.028417 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzb5f\" (UniqueName: \"kubernetes.io/projected/c10e9f86-3844-4b7e-954f-a2f868c86b35-kube-api-access-rzb5f\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.028616 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-credential-keys\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.028766 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-internal-tls-certs\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.033454 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-internal-tls-certs\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.033970 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-scripts\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.034595 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-public-tls-certs\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.034709 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-fernet-keys\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.034712 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-credential-keys\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.034863 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-combined-ca-bundle\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.035768 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c10e9f86-3844-4b7e-954f-a2f868c86b35-config-data\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.063048 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzb5f\" (UniqueName: \"kubernetes.io/projected/c10e9f86-3844-4b7e-954f-a2f868c86b35-kube-api-access-rzb5f\") pod \"keystone-78b6b775cc-bbrl7\" (UID: \"c10e9f86-3844-4b7e-954f-a2f868c86b35\") " pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.113261 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.592241 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-78b6b775cc-bbrl7"] Dec 10 00:55:36 crc kubenswrapper[4884]: I1210 00:55:36.697073 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7656cd6689-46nqx" event={"ID":"f5135453-5688-4022-b536-cdd8c5e62926","Type":"ContainerStarted","Data":"a2ceee5ac60f8a3344741844cee19a5bb070af20905b6b8d1cb1a52fbb10e066"} Dec 10 00:55:37 crc kubenswrapper[4884]: I1210 00:55:37.708038 4884 generic.go:334] "Generic (PLEG): container finished" podID="0b7ea171-752a-4d11-b544-d5c69f602dd0" containerID="815e6501db560851dd335dc8262103f42f8bd1676a002e7d0597ca529220a1a3" exitCode=0 Dec 10 00:55:37 crc kubenswrapper[4884]: I1210 00:55:37.708098 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-w7clk" event={"ID":"0b7ea171-752a-4d11-b544-d5c69f602dd0","Type":"ContainerDied","Data":"815e6501db560851dd335dc8262103f42f8bd1676a002e7d0597ca529220a1a3"} Dec 10 00:55:38 crc kubenswrapper[4884]: I1210 00:55:38.368096 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:55:38 crc kubenswrapper[4884]: I1210 00:55:38.435203 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-dg99w"] Dec 10 00:55:38 crc kubenswrapper[4884]: I1210 00:55:38.435451 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" podUID="d0acf48a-d696-430d-8e7e-e92de923c9be" containerName="dnsmasq-dns" containerID="cri-o://2999aa7691d4898c99efc6e61802a4174aad4843c559e9ba740cb505b7752a2d" gracePeriod=10 Dec 10 00:55:38 crc kubenswrapper[4884]: I1210 00:55:38.723749 4884 generic.go:334] "Generic (PLEG): container finished" podID="d0acf48a-d696-430d-8e7e-e92de923c9be" containerID="2999aa7691d4898c99efc6e61802a4174aad4843c559e9ba740cb505b7752a2d" exitCode=0 Dec 10 00:55:38 crc kubenswrapper[4884]: I1210 00:55:38.723814 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" event={"ID":"d0acf48a-d696-430d-8e7e-e92de923c9be","Type":"ContainerDied","Data":"2999aa7691d4898c99efc6e61802a4174aad4843c559e9ba740cb505b7752a2d"} Dec 10 00:55:39 crc kubenswrapper[4884]: I1210 00:55:39.149957 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" podUID="d0acf48a-d696-430d-8e7e-e92de923c9be" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.172:5353: connect: connection refused" Dec 10 00:55:41 crc kubenswrapper[4884]: W1210 00:55:41.706261 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc10e9f86_3844_4b7e_954f_a2f868c86b35.slice/crio-bf68afb2fd04e2df65349e81b3628b4b68cc39334518f728bd99618f09b17c38 WatchSource:0}: Error finding container bf68afb2fd04e2df65349e81b3628b4b68cc39334518f728bd99618f09b17c38: Status 404 returned error can't find the container with id bf68afb2fd04e2df65349e81b3628b4b68cc39334518f728bd99618f09b17c38 Dec 10 00:55:41 crc kubenswrapper[4884]: I1210 00:55:41.784702 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-78b6b775cc-bbrl7" event={"ID":"c10e9f86-3844-4b7e-954f-a2f868c86b35","Type":"ContainerStarted","Data":"bf68afb2fd04e2df65349e81b3628b4b68cc39334518f728bd99618f09b17c38"} Dec 10 00:55:41 crc kubenswrapper[4884]: I1210 00:55:41.799927 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-w7clk" event={"ID":"0b7ea171-752a-4d11-b544-d5c69f602dd0","Type":"ContainerDied","Data":"0363f38f6603f1729b31e59ac2ac6a42f4ca2a34bf4c8965008b966c5ff0fb27"} Dec 10 00:55:41 crc kubenswrapper[4884]: I1210 00:55:41.799976 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0363f38f6603f1729b31e59ac2ac6a42f4ca2a34bf4c8965008b966c5ff0fb27" Dec 10 00:55:41 crc kubenswrapper[4884]: I1210 00:55:41.926590 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-w7clk" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.078887 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-scripts\") pod \"0b7ea171-752a-4d11-b544-d5c69f602dd0\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.078976 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-combined-ca-bundle\") pod \"0b7ea171-752a-4d11-b544-d5c69f602dd0\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.079028 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-config-data\") pod \"0b7ea171-752a-4d11-b544-d5c69f602dd0\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.079121 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxw66\" (UniqueName: \"kubernetes.io/projected/0b7ea171-752a-4d11-b544-d5c69f602dd0-kube-api-access-xxw66\") pod \"0b7ea171-752a-4d11-b544-d5c69f602dd0\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.079181 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b7ea171-752a-4d11-b544-d5c69f602dd0-logs\") pod \"0b7ea171-752a-4d11-b544-d5c69f602dd0\" (UID: \"0b7ea171-752a-4d11-b544-d5c69f602dd0\") " Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.079991 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b7ea171-752a-4d11-b544-d5c69f602dd0-logs" (OuterVolumeSpecName: "logs") pod "0b7ea171-752a-4d11-b544-d5c69f602dd0" (UID: "0b7ea171-752a-4d11-b544-d5c69f602dd0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.092013 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-scripts" (OuterVolumeSpecName: "scripts") pod "0b7ea171-752a-4d11-b544-d5c69f602dd0" (UID: "0b7ea171-752a-4d11-b544-d5c69f602dd0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.093879 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b7ea171-752a-4d11-b544-d5c69f602dd0-kube-api-access-xxw66" (OuterVolumeSpecName: "kube-api-access-xxw66") pod "0b7ea171-752a-4d11-b544-d5c69f602dd0" (UID: "0b7ea171-752a-4d11-b544-d5c69f602dd0"). InnerVolumeSpecName "kube-api-access-xxw66". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.132759 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0b7ea171-752a-4d11-b544-d5c69f602dd0" (UID: "0b7ea171-752a-4d11-b544-d5c69f602dd0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.162032 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.181521 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-config-data" (OuterVolumeSpecName: "config-data") pod "0b7ea171-752a-4d11-b544-d5c69f602dd0" (UID: "0b7ea171-752a-4d11-b544-d5c69f602dd0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.181973 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b7ea171-752a-4d11-b544-d5c69f602dd0-logs\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.181996 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.182005 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.182015 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b7ea171-752a-4d11-b544-d5c69f602dd0-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.182023 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxw66\" (UniqueName: \"kubernetes.io/projected/0b7ea171-752a-4d11-b544-d5c69f602dd0-kube-api-access-xxw66\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.283608 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-config\") pod \"d0acf48a-d696-430d-8e7e-e92de923c9be\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.283653 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-ovsdbserver-nb\") pod \"d0acf48a-d696-430d-8e7e-e92de923c9be\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.283681 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-dns-swift-storage-0\") pod \"d0acf48a-d696-430d-8e7e-e92de923c9be\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.283767 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-dns-svc\") pod \"d0acf48a-d696-430d-8e7e-e92de923c9be\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.283792 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-ovsdbserver-sb\") pod \"d0acf48a-d696-430d-8e7e-e92de923c9be\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.283917 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wncl4\" (UniqueName: \"kubernetes.io/projected/d0acf48a-d696-430d-8e7e-e92de923c9be-kube-api-access-wncl4\") pod \"d0acf48a-d696-430d-8e7e-e92de923c9be\" (UID: \"d0acf48a-d696-430d-8e7e-e92de923c9be\") " Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.287740 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0acf48a-d696-430d-8e7e-e92de923c9be-kube-api-access-wncl4" (OuterVolumeSpecName: "kube-api-access-wncl4") pod "d0acf48a-d696-430d-8e7e-e92de923c9be" (UID: "d0acf48a-d696-430d-8e7e-e92de923c9be"). InnerVolumeSpecName "kube-api-access-wncl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.328850 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d0acf48a-d696-430d-8e7e-e92de923c9be" (UID: "d0acf48a-d696-430d-8e7e-e92de923c9be"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.337498 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d0acf48a-d696-430d-8e7e-e92de923c9be" (UID: "d0acf48a-d696-430d-8e7e-e92de923c9be"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.340535 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-config" (OuterVolumeSpecName: "config") pod "d0acf48a-d696-430d-8e7e-e92de923c9be" (UID: "d0acf48a-d696-430d-8e7e-e92de923c9be"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.345247 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d0acf48a-d696-430d-8e7e-e92de923c9be" (UID: "d0acf48a-d696-430d-8e7e-e92de923c9be"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.355909 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d0acf48a-d696-430d-8e7e-e92de923c9be" (UID: "d0acf48a-d696-430d-8e7e-e92de923c9be"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.386642 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.386670 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.386683 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.386694 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.386703 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d0acf48a-d696-430d-8e7e-e92de923c9be-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.386809 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wncl4\" (UniqueName: \"kubernetes.io/projected/d0acf48a-d696-430d-8e7e-e92de923c9be-kube-api-access-wncl4\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.811311 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-fkf86" event={"ID":"da3a231b-de19-4217-8b1b-54d40e56f0c3","Type":"ContainerStarted","Data":"6e9e119f5965300ccc87f06e72653e87bd5b2f5f70c17279cb22ac8c531221d6"} Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.813911 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7656cd6689-46nqx" event={"ID":"f5135453-5688-4022-b536-cdd8c5e62926","Type":"ContainerStarted","Data":"160784c71d4525b891b4fcbe5ec4a4e8f8fe2f62c2041fd02d7dcefab7945603"} Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.814069 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.816789 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"341036d7-5d9f-493a-b043-11d6517c390d","Type":"ContainerStarted","Data":"12a903484f96f6a787ada5646f4c6141f58587a1f62c000387897987f9abaa29"} Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.818379 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-slxfj" event={"ID":"175b7d23-6c78-4a15-9f04-f40b83d3a932","Type":"ContainerStarted","Data":"ddd6ed187686dbe6e3b919b1253d8dca9746e67fda2228465e9360af596653c3"} Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.820163 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.820183 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-dg99w" event={"ID":"d0acf48a-d696-430d-8e7e-e92de923c9be","Type":"ContainerDied","Data":"fe01c055399e0efd2e8d351e7cef85dda4cfa2c06a6d31dbbfcbac58bb90cc8b"} Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.820225 4884 scope.go:117] "RemoveContainer" containerID="2999aa7691d4898c99efc6e61802a4174aad4843c559e9ba740cb505b7752a2d" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.821759 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-78b6b775cc-bbrl7" event={"ID":"c10e9f86-3844-4b7e-954f-a2f868c86b35","Type":"ContainerStarted","Data":"f1249f04c245552d2b71771b205c276b455a2d107dc039d1396f1b605a6659ac"} Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.821799 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-w7clk" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.821861 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.844104 4884 scope.go:117] "RemoveContainer" containerID="dc8e5b214528dda8fbcdbd2860657c9f1545b2d81a0ce8aa1257ca78bd491600" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.845324 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-fkf86" podStartSLOduration=3.918752396 podStartE2EDuration="44.845301761s" podCreationTimestamp="2025-12-10 00:54:58 +0000 UTC" firstStartedPulling="2025-12-10 00:55:00.993975193 +0000 UTC m=+1474.071932310" lastFinishedPulling="2025-12-10 00:55:41.920524558 +0000 UTC m=+1514.998481675" observedRunningTime="2025-12-10 00:55:42.833195382 +0000 UTC m=+1515.911152509" watchObservedRunningTime="2025-12-10 00:55:42.845301761 +0000 UTC m=+1515.923258888" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.863280 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-slxfj" podStartSLOduration=2.9236998400000003 podStartE2EDuration="44.863260247s" podCreationTimestamp="2025-12-10 00:54:58 +0000 UTC" firstStartedPulling="2025-12-10 00:55:00.08953368 +0000 UTC m=+1473.167490797" lastFinishedPulling="2025-12-10 00:55:42.029094087 +0000 UTC m=+1515.107051204" observedRunningTime="2025-12-10 00:55:42.852414893 +0000 UTC m=+1515.930372020" watchObservedRunningTime="2025-12-10 00:55:42.863260247 +0000 UTC m=+1515.941217354" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.884894 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-78b6b775cc-bbrl7" podStartSLOduration=7.884874493 podStartE2EDuration="7.884874493s" podCreationTimestamp="2025-12-10 00:55:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:42.875581701 +0000 UTC m=+1515.953538818" watchObservedRunningTime="2025-12-10 00:55:42.884874493 +0000 UTC m=+1515.962831620" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.900499 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7656cd6689-46nqx" podStartSLOduration=9.896932479 podStartE2EDuration="9.896932479s" podCreationTimestamp="2025-12-10 00:55:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:42.892277603 +0000 UTC m=+1515.970234750" watchObservedRunningTime="2025-12-10 00:55:42.896932479 +0000 UTC m=+1515.974889606" Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.927324 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-dg99w"] Dec 10 00:55:42 crc kubenswrapper[4884]: I1210 00:55:42.934257 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-dg99w"] Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.047397 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-c95474448-hcdj6"] Dec 10 00:55:43 crc kubenswrapper[4884]: E1210 00:55:43.048142 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0acf48a-d696-430d-8e7e-e92de923c9be" containerName="init" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.048319 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0acf48a-d696-430d-8e7e-e92de923c9be" containerName="init" Dec 10 00:55:43 crc kubenswrapper[4884]: E1210 00:55:43.048487 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b7ea171-752a-4d11-b544-d5c69f602dd0" containerName="placement-db-sync" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.048625 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b7ea171-752a-4d11-b544-d5c69f602dd0" containerName="placement-db-sync" Dec 10 00:55:43 crc kubenswrapper[4884]: E1210 00:55:43.048704 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0acf48a-d696-430d-8e7e-e92de923c9be" containerName="dnsmasq-dns" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.048800 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0acf48a-d696-430d-8e7e-e92de923c9be" containerName="dnsmasq-dns" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.049155 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0acf48a-d696-430d-8e7e-e92de923c9be" containerName="dnsmasq-dns" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.049255 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b7ea171-752a-4d11-b544-d5c69f602dd0" containerName="placement-db-sync" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.051983 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.055231 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-76ssl" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.055495 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.055865 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.056306 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.057245 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.062617 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c95474448-hcdj6"] Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.203401 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-scripts\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.203717 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-config-data\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.203846 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-public-tls-certs\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.203961 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86fk5\" (UniqueName: \"kubernetes.io/projected/fb9dbf85-546d-418a-a001-c75e5817e1b7-kube-api-access-86fk5\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.204341 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb9dbf85-546d-418a-a001-c75e5817e1b7-logs\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.204401 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-internal-tls-certs\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.204459 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-combined-ca-bundle\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.298897 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0acf48a-d696-430d-8e7e-e92de923c9be" path="/var/lib/kubelet/pods/d0acf48a-d696-430d-8e7e-e92de923c9be/volumes" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.306657 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-scripts\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.306737 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-config-data\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.306775 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-public-tls-certs\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.306807 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86fk5\" (UniqueName: \"kubernetes.io/projected/fb9dbf85-546d-418a-a001-c75e5817e1b7-kube-api-access-86fk5\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.306922 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb9dbf85-546d-418a-a001-c75e5817e1b7-logs\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.306952 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-internal-tls-certs\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.306981 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-combined-ca-bundle\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.308152 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb9dbf85-546d-418a-a001-c75e5817e1b7-logs\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.313811 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-scripts\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.315913 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-internal-tls-certs\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.317576 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-combined-ca-bundle\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.320140 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-config-data\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.327678 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb9dbf85-546d-418a-a001-c75e5817e1b7-public-tls-certs\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.339457 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86fk5\" (UniqueName: \"kubernetes.io/projected/fb9dbf85-546d-418a-a001-c75e5817e1b7-kube-api-access-86fk5\") pod \"placement-c95474448-hcdj6\" (UID: \"fb9dbf85-546d-418a-a001-c75e5817e1b7\") " pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.406843 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:43 crc kubenswrapper[4884]: I1210 00:55:43.906028 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c95474448-hcdj6"] Dec 10 00:55:43 crc kubenswrapper[4884]: W1210 00:55:43.920022 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb9dbf85_546d_418a_a001_c75e5817e1b7.slice/crio-d801177b14c9aae61b83d5040f6d5b27ad34bedc47c7996e810e3e8638eb10ff WatchSource:0}: Error finding container d801177b14c9aae61b83d5040f6d5b27ad34bedc47c7996e810e3e8638eb10ff: Status 404 returned error can't find the container with id d801177b14c9aae61b83d5040f6d5b27ad34bedc47c7996e810e3e8638eb10ff Dec 10 00:55:44 crc kubenswrapper[4884]: I1210 00:55:44.856214 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c95474448-hcdj6" event={"ID":"fb9dbf85-546d-418a-a001-c75e5817e1b7","Type":"ContainerStarted","Data":"b2dae6b21b3fa452b49ea1a558df85afdf1810f78acc1ebaec3b88b7bb4eb15f"} Dec 10 00:55:44 crc kubenswrapper[4884]: I1210 00:55:44.856609 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c95474448-hcdj6" event={"ID":"fb9dbf85-546d-418a-a001-c75e5817e1b7","Type":"ContainerStarted","Data":"4233bb43b87f425626c4d2b57fb4be1aef752174913b3f86062a89c09a0b7131"} Dec 10 00:55:44 crc kubenswrapper[4884]: I1210 00:55:44.856972 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:44 crc kubenswrapper[4884]: I1210 00:55:44.856991 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c95474448-hcdj6" event={"ID":"fb9dbf85-546d-418a-a001-c75e5817e1b7","Type":"ContainerStarted","Data":"d801177b14c9aae61b83d5040f6d5b27ad34bedc47c7996e810e3e8638eb10ff"} Dec 10 00:55:44 crc kubenswrapper[4884]: I1210 00:55:44.857007 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-c95474448-hcdj6" Dec 10 00:55:44 crc kubenswrapper[4884]: I1210 00:55:44.865611 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-brxjz" event={"ID":"5aeda9c4-9011-47b9-8083-f0309ed8a010","Type":"ContainerStarted","Data":"f43e9d15724a5efbdbefd705bb1c4b951ac9e1badc4cfdfb817b0021ecb2bf27"} Dec 10 00:55:44 crc kubenswrapper[4884]: I1210 00:55:44.870348 4884 generic.go:334] "Generic (PLEG): container finished" podID="da3a231b-de19-4217-8b1b-54d40e56f0c3" containerID="6e9e119f5965300ccc87f06e72653e87bd5b2f5f70c17279cb22ac8c531221d6" exitCode=0 Dec 10 00:55:44 crc kubenswrapper[4884]: I1210 00:55:44.870406 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-fkf86" event={"ID":"da3a231b-de19-4217-8b1b-54d40e56f0c3","Type":"ContainerDied","Data":"6e9e119f5965300ccc87f06e72653e87bd5b2f5f70c17279cb22ac8c531221d6"} Dec 10 00:55:44 crc kubenswrapper[4884]: I1210 00:55:44.897359 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-c95474448-hcdj6" podStartSLOduration=1.897341481 podStartE2EDuration="1.897341481s" podCreationTimestamp="2025-12-10 00:55:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:44.876676972 +0000 UTC m=+1517.954634089" watchObservedRunningTime="2025-12-10 00:55:44.897341481 +0000 UTC m=+1517.975298598" Dec 10 00:55:44 crc kubenswrapper[4884]: I1210 00:55:44.900078 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-brxjz" podStartSLOduration=3.198234444 podStartE2EDuration="46.900068225s" podCreationTimestamp="2025-12-10 00:54:58 +0000 UTC" firstStartedPulling="2025-12-10 00:55:00.090655631 +0000 UTC m=+1473.168612748" lastFinishedPulling="2025-12-10 00:55:43.792489402 +0000 UTC m=+1516.870446529" observedRunningTime="2025-12-10 00:55:44.890808035 +0000 UTC m=+1517.968765162" watchObservedRunningTime="2025-12-10 00:55:44.900068225 +0000 UTC m=+1517.978025342" Dec 10 00:55:45 crc kubenswrapper[4884]: I1210 00:55:45.885365 4884 generic.go:334] "Generic (PLEG): container finished" podID="175b7d23-6c78-4a15-9f04-f40b83d3a932" containerID="ddd6ed187686dbe6e3b919b1253d8dca9746e67fda2228465e9360af596653c3" exitCode=0 Dec 10 00:55:45 crc kubenswrapper[4884]: I1210 00:55:45.885456 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-slxfj" event={"ID":"175b7d23-6c78-4a15-9f04-f40b83d3a932","Type":"ContainerDied","Data":"ddd6ed187686dbe6e3b919b1253d8dca9746e67fda2228465e9360af596653c3"} Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.097693 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.099462 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.223531 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-fkf86" Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.410298 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tj6l5\" (UniqueName: \"kubernetes.io/projected/da3a231b-de19-4217-8b1b-54d40e56f0c3-kube-api-access-tj6l5\") pod \"da3a231b-de19-4217-8b1b-54d40e56f0c3\" (UID: \"da3a231b-de19-4217-8b1b-54d40e56f0c3\") " Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.410354 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da3a231b-de19-4217-8b1b-54d40e56f0c3-db-sync-config-data\") pod \"da3a231b-de19-4217-8b1b-54d40e56f0c3\" (UID: \"da3a231b-de19-4217-8b1b-54d40e56f0c3\") " Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.410547 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da3a231b-de19-4217-8b1b-54d40e56f0c3-combined-ca-bundle\") pod \"da3a231b-de19-4217-8b1b-54d40e56f0c3\" (UID: \"da3a231b-de19-4217-8b1b-54d40e56f0c3\") " Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.416961 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da3a231b-de19-4217-8b1b-54d40e56f0c3-kube-api-access-tj6l5" (OuterVolumeSpecName: "kube-api-access-tj6l5") pod "da3a231b-de19-4217-8b1b-54d40e56f0c3" (UID: "da3a231b-de19-4217-8b1b-54d40e56f0c3"). InnerVolumeSpecName "kube-api-access-tj6l5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.423653 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da3a231b-de19-4217-8b1b-54d40e56f0c3-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "da3a231b-de19-4217-8b1b-54d40e56f0c3" (UID: "da3a231b-de19-4217-8b1b-54d40e56f0c3"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.441550 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da3a231b-de19-4217-8b1b-54d40e56f0c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da3a231b-de19-4217-8b1b-54d40e56f0c3" (UID: "da3a231b-de19-4217-8b1b-54d40e56f0c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.512523 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tj6l5\" (UniqueName: \"kubernetes.io/projected/da3a231b-de19-4217-8b1b-54d40e56f0c3-kube-api-access-tj6l5\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.512557 4884 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/da3a231b-de19-4217-8b1b-54d40e56f0c3-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.512570 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da3a231b-de19-4217-8b1b-54d40e56f0c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.929223 4884 generic.go:334] "Generic (PLEG): container finished" podID="5aeda9c4-9011-47b9-8083-f0309ed8a010" containerID="f43e9d15724a5efbdbefd705bb1c4b951ac9e1badc4cfdfb817b0021ecb2bf27" exitCode=0 Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.929463 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-brxjz" event={"ID":"5aeda9c4-9011-47b9-8083-f0309ed8a010","Type":"ContainerDied","Data":"f43e9d15724a5efbdbefd705bb1c4b951ac9e1badc4cfdfb817b0021ecb2bf27"} Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.931978 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-fkf86" event={"ID":"da3a231b-de19-4217-8b1b-54d40e56f0c3","Type":"ContainerDied","Data":"e8acecb5c63745f293e125a8d27d7dca2abab88f80c664b0999d87a711aa4e99"} Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.932004 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e8acecb5c63745f293e125a8d27d7dca2abab88f80c664b0999d87a711aa4e99" Dec 10 00:55:48 crc kubenswrapper[4884]: I1210 00:55:48.932041 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-fkf86" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.102210 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-slxfj" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.223351 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5c2v\" (UniqueName: \"kubernetes.io/projected/175b7d23-6c78-4a15-9f04-f40b83d3a932-kube-api-access-s5c2v\") pod \"175b7d23-6c78-4a15-9f04-f40b83d3a932\" (UID: \"175b7d23-6c78-4a15-9f04-f40b83d3a932\") " Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.223650 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/175b7d23-6c78-4a15-9f04-f40b83d3a932-config-data\") pod \"175b7d23-6c78-4a15-9f04-f40b83d3a932\" (UID: \"175b7d23-6c78-4a15-9f04-f40b83d3a932\") " Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.224072 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175b7d23-6c78-4a15-9f04-f40b83d3a932-combined-ca-bundle\") pod \"175b7d23-6c78-4a15-9f04-f40b83d3a932\" (UID: \"175b7d23-6c78-4a15-9f04-f40b83d3a932\") " Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.226897 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/175b7d23-6c78-4a15-9f04-f40b83d3a932-kube-api-access-s5c2v" (OuterVolumeSpecName: "kube-api-access-s5c2v") pod "175b7d23-6c78-4a15-9f04-f40b83d3a932" (UID: "175b7d23-6c78-4a15-9f04-f40b83d3a932"). InnerVolumeSpecName "kube-api-access-s5c2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.267899 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/175b7d23-6c78-4a15-9f04-f40b83d3a932-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "175b7d23-6c78-4a15-9f04-f40b83d3a932" (UID: "175b7d23-6c78-4a15-9f04-f40b83d3a932"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.327022 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/175b7d23-6c78-4a15-9f04-f40b83d3a932-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.327243 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5c2v\" (UniqueName: \"kubernetes.io/projected/175b7d23-6c78-4a15-9f04-f40b83d3a932-kube-api-access-s5c2v\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.330761 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/175b7d23-6c78-4a15-9f04-f40b83d3a932-config-data" (OuterVolumeSpecName: "config-data") pod "175b7d23-6c78-4a15-9f04-f40b83d3a932" (UID: "175b7d23-6c78-4a15-9f04-f40b83d3a932"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.428801 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/175b7d23-6c78-4a15-9f04-f40b83d3a932-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.484417 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-67b785fd9b-fkshr"] Dec 10 00:55:49 crc kubenswrapper[4884]: E1210 00:55:49.485032 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da3a231b-de19-4217-8b1b-54d40e56f0c3" containerName="barbican-db-sync" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.485047 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="da3a231b-de19-4217-8b1b-54d40e56f0c3" containerName="barbican-db-sync" Dec 10 00:55:49 crc kubenswrapper[4884]: E1210 00:55:49.485067 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="175b7d23-6c78-4a15-9f04-f40b83d3a932" containerName="heat-db-sync" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.485073 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="175b7d23-6c78-4a15-9f04-f40b83d3a932" containerName="heat-db-sync" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.485259 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="175b7d23-6c78-4a15-9f04-f40b83d3a932" containerName="heat-db-sync" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.485282 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="da3a231b-de19-4217-8b1b-54d40e56f0c3" containerName="barbican-db-sync" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.486270 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.496191 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.497721 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-bm8kl" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.497938 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.517510 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7fffddc689-blxpq"] Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.519585 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.530023 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-67b785fd9b-fkshr"] Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.532274 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.585673 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7fffddc689-blxpq"] Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.631528 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c921edd1-2c65-40c6-b946-df2ce3a6ed64-config-data\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.631576 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/339b9f5b-e372-4d2c-b939-3dabc25eef48-config-data\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.631613 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c921edd1-2c65-40c6-b946-df2ce3a6ed64-combined-ca-bundle\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.631651 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/339b9f5b-e372-4d2c-b939-3dabc25eef48-logs\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.631687 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mxq2z\" (UniqueName: \"kubernetes.io/projected/339b9f5b-e372-4d2c-b939-3dabc25eef48-kube-api-access-mxq2z\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.631710 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c921edd1-2c65-40c6-b946-df2ce3a6ed64-config-data-custom\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.631741 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/339b9f5b-e372-4d2c-b939-3dabc25eef48-config-data-custom\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.631809 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rc9q\" (UniqueName: \"kubernetes.io/projected/c921edd1-2c65-40c6-b946-df2ce3a6ed64-kube-api-access-8rc9q\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.631828 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/339b9f5b-e372-4d2c-b939-3dabc25eef48-combined-ca-bundle\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.631850 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c921edd1-2c65-40c6-b946-df2ce3a6ed64-logs\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.719864 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-46qgb"] Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.721497 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.733454 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c921edd1-2c65-40c6-b946-df2ce3a6ed64-combined-ca-bundle\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.733523 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/339b9f5b-e372-4d2c-b939-3dabc25eef48-logs\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.733564 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mxq2z\" (UniqueName: \"kubernetes.io/projected/339b9f5b-e372-4d2c-b939-3dabc25eef48-kube-api-access-mxq2z\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.733590 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c921edd1-2c65-40c6-b946-df2ce3a6ed64-config-data-custom\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.733620 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/339b9f5b-e372-4d2c-b939-3dabc25eef48-config-data-custom\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.734409 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rc9q\" (UniqueName: \"kubernetes.io/projected/c921edd1-2c65-40c6-b946-df2ce3a6ed64-kube-api-access-8rc9q\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.734451 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/339b9f5b-e372-4d2c-b939-3dabc25eef48-combined-ca-bundle\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.734479 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c921edd1-2c65-40c6-b946-df2ce3a6ed64-logs\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.734501 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c921edd1-2c65-40c6-b946-df2ce3a6ed64-config-data\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.734520 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/339b9f5b-e372-4d2c-b939-3dabc25eef48-config-data\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.736068 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/339b9f5b-e372-4d2c-b939-3dabc25eef48-logs\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.741795 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c921edd1-2c65-40c6-b946-df2ce3a6ed64-config-data-custom\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.742157 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c921edd1-2c65-40c6-b946-df2ce3a6ed64-combined-ca-bundle\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.742846 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/339b9f5b-e372-4d2c-b939-3dabc25eef48-config-data\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.743085 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c921edd1-2c65-40c6-b946-df2ce3a6ed64-logs\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.757395 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/339b9f5b-e372-4d2c-b939-3dabc25eef48-config-data-custom\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.759131 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/339b9f5b-e372-4d2c-b939-3dabc25eef48-combined-ca-bundle\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.766869 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-46qgb"] Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.769554 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rc9q\" (UniqueName: \"kubernetes.io/projected/c921edd1-2c65-40c6-b946-df2ce3a6ed64-kube-api-access-8rc9q\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.770768 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mxq2z\" (UniqueName: \"kubernetes.io/projected/339b9f5b-e372-4d2c-b939-3dabc25eef48-kube-api-access-mxq2z\") pod \"barbican-keystone-listener-67b785fd9b-fkshr\" (UID: \"339b9f5b-e372-4d2c-b939-3dabc25eef48\") " pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.787204 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c921edd1-2c65-40c6-b946-df2ce3a6ed64-config-data\") pod \"barbican-worker-7fffddc689-blxpq\" (UID: \"c921edd1-2c65-40c6-b946-df2ce3a6ed64\") " pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.810762 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-858b44c594-bz25h"] Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.812794 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.820606 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.824025 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-858b44c594-bz25h"] Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.836328 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-config\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.836375 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.836402 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pnmt\" (UniqueName: \"kubernetes.io/projected/723220f4-e60b-42b3-aa46-010d8a624675-kube-api-access-9pnmt\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.836451 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.836523 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.836584 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.855542 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.885777 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fffddc689-blxpq" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.953134 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4tkk\" (UniqueName: \"kubernetes.io/projected/2697b83e-e631-4daa-bbcd-eba9dbb4762a-kube-api-access-n4tkk\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.953201 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.953219 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-config\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.953246 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.953266 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pnmt\" (UniqueName: \"kubernetes.io/projected/723220f4-e60b-42b3-aa46-010d8a624675-kube-api-access-9pnmt\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.953282 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-combined-ca-bundle\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.953337 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.953365 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-config-data-custom\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.953410 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-config-data\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.953464 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.953483 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2697b83e-e631-4daa-bbcd-eba9dbb4762a-logs\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.954305 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.954845 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-config\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.955318 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.959550 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.959578 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.989170 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pnmt\" (UniqueName: \"kubernetes.io/projected/723220f4-e60b-42b3-aa46-010d8a624675-kube-api-access-9pnmt\") pod \"dnsmasq-dns-848cf88cfc-46qgb\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.990994 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-slxfj" Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.991032 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-slxfj" event={"ID":"175b7d23-6c78-4a15-9f04-f40b83d3a932","Type":"ContainerDied","Data":"2176c0accb637087952791031d5af0e33458a363bce251b353d48b0b5e8483ed"} Dec 10 00:55:49 crc kubenswrapper[4884]: I1210 00:55:49.991055 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2176c0accb637087952791031d5af0e33458a363bce251b353d48b0b5e8483ed" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.060508 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-combined-ca-bundle\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.060604 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-config-data-custom\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.060667 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-config-data\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.060716 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2697b83e-e631-4daa-bbcd-eba9dbb4762a-logs\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.060774 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4tkk\" (UniqueName: \"kubernetes.io/projected/2697b83e-e631-4daa-bbcd-eba9dbb4762a-kube-api-access-n4tkk\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.061281 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2697b83e-e631-4daa-bbcd-eba9dbb4762a-logs\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.065301 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-combined-ca-bundle\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.065752 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-config-data-custom\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.067611 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-config-data\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.075746 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4tkk\" (UniqueName: \"kubernetes.io/projected/2697b83e-e631-4daa-bbcd-eba9dbb4762a-kube-api-access-n4tkk\") pod \"barbican-api-858b44c594-bz25h\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.087776 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.180680 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.597511 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-brxjz" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.677085 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-db-sync-config-data\") pod \"5aeda9c4-9011-47b9-8083-f0309ed8a010\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.677159 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-config-data\") pod \"5aeda9c4-9011-47b9-8083-f0309ed8a010\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.677225 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-scripts\") pod \"5aeda9c4-9011-47b9-8083-f0309ed8a010\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.677245 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5aeda9c4-9011-47b9-8083-f0309ed8a010-etc-machine-id\") pod \"5aeda9c4-9011-47b9-8083-f0309ed8a010\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.677280 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-combined-ca-bundle\") pod \"5aeda9c4-9011-47b9-8083-f0309ed8a010\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.677347 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flm5p\" (UniqueName: \"kubernetes.io/projected/5aeda9c4-9011-47b9-8083-f0309ed8a010-kube-api-access-flm5p\") pod \"5aeda9c4-9011-47b9-8083-f0309ed8a010\" (UID: \"5aeda9c4-9011-47b9-8083-f0309ed8a010\") " Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.679015 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5aeda9c4-9011-47b9-8083-f0309ed8a010-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5aeda9c4-9011-47b9-8083-f0309ed8a010" (UID: "5aeda9c4-9011-47b9-8083-f0309ed8a010"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.689310 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5aeda9c4-9011-47b9-8083-f0309ed8a010-kube-api-access-flm5p" (OuterVolumeSpecName: "kube-api-access-flm5p") pod "5aeda9c4-9011-47b9-8083-f0309ed8a010" (UID: "5aeda9c4-9011-47b9-8083-f0309ed8a010"). InnerVolumeSpecName "kube-api-access-flm5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.697668 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-scripts" (OuterVolumeSpecName: "scripts") pod "5aeda9c4-9011-47b9-8083-f0309ed8a010" (UID: "5aeda9c4-9011-47b9-8083-f0309ed8a010"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.707591 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "5aeda9c4-9011-47b9-8083-f0309ed8a010" (UID: "5aeda9c4-9011-47b9-8083-f0309ed8a010"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.734860 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5aeda9c4-9011-47b9-8083-f0309ed8a010" (UID: "5aeda9c4-9011-47b9-8083-f0309ed8a010"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.782443 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-config-data" (OuterVolumeSpecName: "config-data") pod "5aeda9c4-9011-47b9-8083-f0309ed8a010" (UID: "5aeda9c4-9011-47b9-8083-f0309ed8a010"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.783843 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flm5p\" (UniqueName: \"kubernetes.io/projected/5aeda9c4-9011-47b9-8083-f0309ed8a010-kube-api-access-flm5p\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.783879 4884 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.783892 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.783904 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.783915 4884 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5aeda9c4-9011-47b9-8083-f0309ed8a010-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:50 crc kubenswrapper[4884]: I1210 00:55:50.783925 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5aeda9c4-9011-47b9-8083-f0309ed8a010-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:50 crc kubenswrapper[4884]: E1210 00:55:50.960357 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="341036d7-5d9f-493a-b043-11d6517c390d" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.004913 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"341036d7-5d9f-493a-b043-11d6517c390d","Type":"ContainerStarted","Data":"746f6a5a4a296ac79c01aac0deb35cb5815770a4f591e7dd9653c67b78d3ed6d"} Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.005015 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="341036d7-5d9f-493a-b043-11d6517c390d" containerName="ceilometer-notification-agent" containerID="cri-o://da117cab1679c3c345de0912eb6f653eafe1b21c86828fb9318b71dd52095381" gracePeriod=30 Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.005167 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.005258 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="341036d7-5d9f-493a-b043-11d6517c390d" containerName="proxy-httpd" containerID="cri-o://746f6a5a4a296ac79c01aac0deb35cb5815770a4f591e7dd9653c67b78d3ed6d" gracePeriod=30 Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.005307 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="341036d7-5d9f-493a-b043-11d6517c390d" containerName="sg-core" containerID="cri-o://12a903484f96f6a787ada5646f4c6141f58587a1f62c000387897987f9abaa29" gracePeriod=30 Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.036600 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-brxjz" event={"ID":"5aeda9c4-9011-47b9-8083-f0309ed8a010","Type":"ContainerDied","Data":"3df0b88fa5fdff682bdb52ae73e8a3c6babca5f1256df23649f4e3fea2782660"} Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.036652 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3df0b88fa5fdff682bdb52ae73e8a3c6babca5f1256df23649f4e3fea2782660" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.036683 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-brxjz" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.125098 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-46qgb"] Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.245493 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-858b44c594-bz25h"] Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.332912 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7fffddc689-blxpq"] Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.388469 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-46qgb"] Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.423267 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 00:55:51 crc kubenswrapper[4884]: E1210 00:55:51.423732 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aeda9c4-9011-47b9-8083-f0309ed8a010" containerName="cinder-db-sync" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.424716 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aeda9c4-9011-47b9-8083-f0309ed8a010" containerName="cinder-db-sync" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.424980 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5aeda9c4-9011-47b9-8083-f0309ed8a010" containerName="cinder-db-sync" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.426014 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.433034 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.433252 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.433368 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.433490 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-mdl82" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.459822 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.502561 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-67b785fd9b-fkshr"] Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.508580 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.508622 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.508645 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-scripts\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.508675 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40f84f97-adf7-479c-b0b1-671edb6e25d0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.508693 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-config-data\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.508779 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfc7x\" (UniqueName: \"kubernetes.io/projected/40f84f97-adf7-479c-b0b1-671edb6e25d0-kube-api-access-cfc7x\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.534953 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-5mctc"] Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.536614 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.553036 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-5mctc"] Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.615555 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.615618 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.615641 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.615662 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-scripts\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.615693 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40f84f97-adf7-479c-b0b1-671edb6e25d0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.615709 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-config-data\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.615770 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.615792 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.615819 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-dns-svc\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.615837 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-config\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.615873 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgpt4\" (UniqueName: \"kubernetes.io/projected/2cecdb9b-c23c-435f-9099-c8ec4118da3a-kube-api-access-bgpt4\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.615894 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfc7x\" (UniqueName: \"kubernetes.io/projected/40f84f97-adf7-479c-b0b1-671edb6e25d0-kube-api-access-cfc7x\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.619989 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40f84f97-adf7-479c-b0b1-671edb6e25d0-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.647093 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.648683 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.655962 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.662280 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfc7x\" (UniqueName: \"kubernetes.io/projected/40f84f97-adf7-479c-b0b1-671edb6e25d0-kube-api-access-cfc7x\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.693102 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719203 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-scripts\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719498 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719538 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8360c172-040b-4665-8244-dfa4dabe7a5c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719555 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719617 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8360c172-040b-4665-8244-dfa4dabe7a5c-logs\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719641 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719662 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719686 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-dns-svc\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719702 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-config\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719731 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgpt4\" (UniqueName: \"kubernetes.io/projected/2cecdb9b-c23c-435f-9099-c8ec4118da3a-kube-api-access-bgpt4\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719767 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data-custom\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719802 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6krj5\" (UniqueName: \"kubernetes.io/projected/8360c172-040b-4665-8244-dfa4dabe7a5c-kube-api-access-6krj5\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.719821 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.720632 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.721314 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.726420 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-dns-svc\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.727230 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-config\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.746110 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.762279 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-scripts\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.763448 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-config-data\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.764288 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.765879 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.770633 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.816200 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgpt4\" (UniqueName: \"kubernetes.io/projected/2cecdb9b-c23c-435f-9099-c8ec4118da3a-kube-api-access-bgpt4\") pod \"dnsmasq-dns-6578955fd5-5mctc\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.825474 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data-custom\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.825542 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6krj5\" (UniqueName: \"kubernetes.io/projected/8360c172-040b-4665-8244-dfa4dabe7a5c-kube-api-access-6krj5\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.825578 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-scripts\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.825601 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.825632 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8360c172-040b-4665-8244-dfa4dabe7a5c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.825650 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.825705 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8360c172-040b-4665-8244-dfa4dabe7a5c-logs\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.826104 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8360c172-040b-4665-8244-dfa4dabe7a5c-logs\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.828684 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8360c172-040b-4665-8244-dfa4dabe7a5c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.833840 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.836709 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.837951 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data-custom\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.854543 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6krj5\" (UniqueName: \"kubernetes.io/projected/8360c172-040b-4665-8244-dfa4dabe7a5c-kube-api-access-6krj5\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.855198 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-scripts\") pod \"cinder-api-0\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " pod="openstack/cinder-api-0" Dec 10 00:55:51 crc kubenswrapper[4884]: I1210 00:55:51.875217 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:52 crc kubenswrapper[4884]: I1210 00:55:52.020209 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 00:55:52 crc kubenswrapper[4884]: I1210 00:55:52.088080 4884 generic.go:334] "Generic (PLEG): container finished" podID="341036d7-5d9f-493a-b043-11d6517c390d" containerID="12a903484f96f6a787ada5646f4c6141f58587a1f62c000387897987f9abaa29" exitCode=2 Dec 10 00:55:52 crc kubenswrapper[4884]: I1210 00:55:52.088180 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"341036d7-5d9f-493a-b043-11d6517c390d","Type":"ContainerDied","Data":"12a903484f96f6a787ada5646f4c6141f58587a1f62c000387897987f9abaa29"} Dec 10 00:55:52 crc kubenswrapper[4884]: I1210 00:55:52.089787 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" event={"ID":"723220f4-e60b-42b3-aa46-010d8a624675","Type":"ContainerStarted","Data":"3a16142f997b808a7ee35d70e2458860da56e97bb32b2a9c2ea3fe0b1d07d0c1"} Dec 10 00:55:52 crc kubenswrapper[4884]: I1210 00:55:52.092141 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-858b44c594-bz25h" event={"ID":"2697b83e-e631-4daa-bbcd-eba9dbb4762a","Type":"ContainerStarted","Data":"ca4dbbaaccf4feab827efd763393ff340553ad3b2bd2e9545229612c1a641126"} Dec 10 00:55:52 crc kubenswrapper[4884]: I1210 00:55:52.093182 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fffddc689-blxpq" event={"ID":"c921edd1-2c65-40c6-b946-df2ce3a6ed64","Type":"ContainerStarted","Data":"02004082b7734524b8bb09293be3bed542690cf832e3fbc7f2654addad816479"} Dec 10 00:55:52 crc kubenswrapper[4884]: I1210 00:55:52.095417 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" event={"ID":"339b9f5b-e372-4d2c-b939-3dabc25eef48","Type":"ContainerStarted","Data":"22199363fada8f02b18ebfc2071df2dca6c7c73b3a9047f3c6a7b8b4cb192398"} Dec 10 00:55:52 crc kubenswrapper[4884]: I1210 00:55:52.363181 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 00:55:52 crc kubenswrapper[4884]: I1210 00:55:52.557328 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-5mctc"] Dec 10 00:55:52 crc kubenswrapper[4884]: I1210 00:55:52.637038 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 10 00:55:53 crc kubenswrapper[4884]: I1210 00:55:53.114934 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40f84f97-adf7-479c-b0b1-671edb6e25d0","Type":"ContainerStarted","Data":"2181224bb94838e324460f8fd158442cadbd7c4320ef434ac9595eea1abc2850"} Dec 10 00:55:53 crc kubenswrapper[4884]: I1210 00:55:53.118191 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8360c172-040b-4665-8244-dfa4dabe7a5c","Type":"ContainerStarted","Data":"16bfdba4d6b0edd160b5c901f6db10ff94d08cc1859a6550bd3c2cc24564421b"} Dec 10 00:55:53 crc kubenswrapper[4884]: I1210 00:55:53.120517 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" event={"ID":"2cecdb9b-c23c-435f-9099-c8ec4118da3a","Type":"ContainerStarted","Data":"f913d239da205d0f2fd67d22439f8b75452802a37816972b4319d3d22bb92c94"} Dec 10 00:55:54 crc kubenswrapper[4884]: I1210 00:55:54.146382 4884 generic.go:334] "Generic (PLEG): container finished" podID="723220f4-e60b-42b3-aa46-010d8a624675" containerID="973fa533bbc50cbd5b697ffce27110e903563c4707339ad4f703a91476d4e630" exitCode=0 Dec 10 00:55:54 crc kubenswrapper[4884]: I1210 00:55:54.146880 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" event={"ID":"723220f4-e60b-42b3-aa46-010d8a624675","Type":"ContainerDied","Data":"973fa533bbc50cbd5b697ffce27110e903563c4707339ad4f703a91476d4e630"} Dec 10 00:55:54 crc kubenswrapper[4884]: I1210 00:55:54.157118 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-858b44c594-bz25h" event={"ID":"2697b83e-e631-4daa-bbcd-eba9dbb4762a","Type":"ContainerStarted","Data":"3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7"} Dec 10 00:55:54 crc kubenswrapper[4884]: I1210 00:55:54.167238 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8360c172-040b-4665-8244-dfa4dabe7a5c","Type":"ContainerStarted","Data":"e50f33282528fcd40669d3807c6df98998177f2e06fe5f5b12d08b888264de0a"} Dec 10 00:55:54 crc kubenswrapper[4884]: I1210 00:55:54.173691 4884 generic.go:334] "Generic (PLEG): container finished" podID="2cecdb9b-c23c-435f-9099-c8ec4118da3a" containerID="0acc21e6e09796daee597f3b8b9fa892b042c0ab08d6abafdf6f71b59ccb0d57" exitCode=0 Dec 10 00:55:54 crc kubenswrapper[4884]: I1210 00:55:54.173724 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" event={"ID":"2cecdb9b-c23c-435f-9099-c8ec4118da3a","Type":"ContainerDied","Data":"0acc21e6e09796daee597f3b8b9fa892b042c0ab08d6abafdf6f71b59ccb0d57"} Dec 10 00:55:54 crc kubenswrapper[4884]: I1210 00:55:54.359179 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.207244 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8360c172-040b-4665-8244-dfa4dabe7a5c","Type":"ContainerStarted","Data":"4f69cbcff8c617a9dffe34c719996fb8f2c8a0f3f506930b52abe5d4e907dfd9"} Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.207821 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8360c172-040b-4665-8244-dfa4dabe7a5c" containerName="cinder-api-log" containerID="cri-o://e50f33282528fcd40669d3807c6df98998177f2e06fe5f5b12d08b888264de0a" gracePeriod=30 Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.207904 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.208221 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="8360c172-040b-4665-8244-dfa4dabe7a5c" containerName="cinder-api" containerID="cri-o://4f69cbcff8c617a9dffe34c719996fb8f2c8a0f3f506930b52abe5d4e907dfd9" gracePeriod=30 Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.212090 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" event={"ID":"2cecdb9b-c23c-435f-9099-c8ec4118da3a","Type":"ContainerStarted","Data":"6f80a30d0ec7d5b050ed42f272509638d01297da42326f6319663958d7235830"} Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.213187 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.233417 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-858b44c594-bz25h" event={"ID":"2697b83e-e631-4daa-bbcd-eba9dbb4762a","Type":"ContainerStarted","Data":"12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39"} Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.233642 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.233726 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.244298 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.244281995 podStartE2EDuration="4.244281995s" podCreationTimestamp="2025-12-10 00:55:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:55.231702924 +0000 UTC m=+1528.309660061" watchObservedRunningTime="2025-12-10 00:55:55.244281995 +0000 UTC m=+1528.322239112" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.256153 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" podStartSLOduration=4.256135176 podStartE2EDuration="4.256135176s" podCreationTimestamp="2025-12-10 00:55:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:55.248836368 +0000 UTC m=+1528.326793485" watchObservedRunningTime="2025-12-10 00:55:55.256135176 +0000 UTC m=+1528.334092293" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.280542 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-858b44c594-bz25h" podStartSLOduration=6.280522837 podStartE2EDuration="6.280522837s" podCreationTimestamp="2025-12-10 00:55:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:55.265967963 +0000 UTC m=+1528.343925080" watchObservedRunningTime="2025-12-10 00:55:55.280522837 +0000 UTC m=+1528.358479954" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.498679 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.642241 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-dns-swift-storage-0\") pod \"723220f4-e60b-42b3-aa46-010d8a624675\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.642356 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-config\") pod \"723220f4-e60b-42b3-aa46-010d8a624675\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.642480 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-ovsdbserver-sb\") pod \"723220f4-e60b-42b3-aa46-010d8a624675\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.642513 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-ovsdbserver-nb\") pod \"723220f4-e60b-42b3-aa46-010d8a624675\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.642541 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9pnmt\" (UniqueName: \"kubernetes.io/projected/723220f4-e60b-42b3-aa46-010d8a624675-kube-api-access-9pnmt\") pod \"723220f4-e60b-42b3-aa46-010d8a624675\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.642566 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-dns-svc\") pod \"723220f4-e60b-42b3-aa46-010d8a624675\" (UID: \"723220f4-e60b-42b3-aa46-010d8a624675\") " Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.652663 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/723220f4-e60b-42b3-aa46-010d8a624675-kube-api-access-9pnmt" (OuterVolumeSpecName: "kube-api-access-9pnmt") pod "723220f4-e60b-42b3-aa46-010d8a624675" (UID: "723220f4-e60b-42b3-aa46-010d8a624675"). InnerVolumeSpecName "kube-api-access-9pnmt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.670379 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-config" (OuterVolumeSpecName: "config") pod "723220f4-e60b-42b3-aa46-010d8a624675" (UID: "723220f4-e60b-42b3-aa46-010d8a624675"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.686923 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "723220f4-e60b-42b3-aa46-010d8a624675" (UID: "723220f4-e60b-42b3-aa46-010d8a624675"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.686937 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "723220f4-e60b-42b3-aa46-010d8a624675" (UID: "723220f4-e60b-42b3-aa46-010d8a624675"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.691451 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "723220f4-e60b-42b3-aa46-010d8a624675" (UID: "723220f4-e60b-42b3-aa46-010d8a624675"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.709080 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "723220f4-e60b-42b3-aa46-010d8a624675" (UID: "723220f4-e60b-42b3-aa46-010d8a624675"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.746528 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.746561 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.746571 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.746581 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9pnmt\" (UniqueName: \"kubernetes.io/projected/723220f4-e60b-42b3-aa46-010d8a624675-kube-api-access-9pnmt\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.746590 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:55 crc kubenswrapper[4884]: I1210 00:55:55.746598 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/723220f4-e60b-42b3-aa46-010d8a624675-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.272487 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" event={"ID":"723220f4-e60b-42b3-aa46-010d8a624675","Type":"ContainerDied","Data":"3a16142f997b808a7ee35d70e2458860da56e97bb32b2a9c2ea3fe0b1d07d0c1"} Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.272744 4884 scope.go:117] "RemoveContainer" containerID="973fa533bbc50cbd5b697ffce27110e903563c4707339ad4f703a91476d4e630" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.272907 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-46qgb" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.298700 4884 generic.go:334] "Generic (PLEG): container finished" podID="8360c172-040b-4665-8244-dfa4dabe7a5c" containerID="4f69cbcff8c617a9dffe34c719996fb8f2c8a0f3f506930b52abe5d4e907dfd9" exitCode=0 Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.299024 4884 generic.go:334] "Generic (PLEG): container finished" podID="8360c172-040b-4665-8244-dfa4dabe7a5c" containerID="e50f33282528fcd40669d3807c6df98998177f2e06fe5f5b12d08b888264de0a" exitCode=143 Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.298850 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8360c172-040b-4665-8244-dfa4dabe7a5c","Type":"ContainerDied","Data":"4f69cbcff8c617a9dffe34c719996fb8f2c8a0f3f506930b52abe5d4e907dfd9"} Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.299126 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8360c172-040b-4665-8244-dfa4dabe7a5c","Type":"ContainerDied","Data":"e50f33282528fcd40669d3807c6df98998177f2e06fe5f5b12d08b888264de0a"} Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.302070 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fffddc689-blxpq" event={"ID":"c921edd1-2c65-40c6-b946-df2ce3a6ed64","Type":"ContainerStarted","Data":"a12126d68f1b1b6a0580735df9e54a328c380437e0c427a2e0936ec8223378ec"} Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.304865 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" event={"ID":"339b9f5b-e372-4d2c-b939-3dabc25eef48","Type":"ContainerStarted","Data":"6cc8dcf694af2ec917f1ecd84319ba7a7e8eefe6bf42ae81aec3cf1c9b481db6"} Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.309769 4884 generic.go:334] "Generic (PLEG): container finished" podID="341036d7-5d9f-493a-b043-11d6517c390d" containerID="da117cab1679c3c345de0912eb6f653eafe1b21c86828fb9318b71dd52095381" exitCode=0 Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.310006 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"341036d7-5d9f-493a-b043-11d6517c390d","Type":"ContainerDied","Data":"da117cab1679c3c345de0912eb6f653eafe1b21c86828fb9318b71dd52095381"} Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.374673 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.451581 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-46qgb"] Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.468170 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-combined-ca-bundle\") pod \"8360c172-040b-4665-8244-dfa4dabe7a5c\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.468271 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8360c172-040b-4665-8244-dfa4dabe7a5c-etc-machine-id\") pod \"8360c172-040b-4665-8244-dfa4dabe7a5c\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.468321 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6krj5\" (UniqueName: \"kubernetes.io/projected/8360c172-040b-4665-8244-dfa4dabe7a5c-kube-api-access-6krj5\") pod \"8360c172-040b-4665-8244-dfa4dabe7a5c\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.468336 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8360c172-040b-4665-8244-dfa4dabe7a5c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "8360c172-040b-4665-8244-dfa4dabe7a5c" (UID: "8360c172-040b-4665-8244-dfa4dabe7a5c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.468345 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-scripts\") pod \"8360c172-040b-4665-8244-dfa4dabe7a5c\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.468520 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data-custom\") pod \"8360c172-040b-4665-8244-dfa4dabe7a5c\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.468585 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data\") pod \"8360c172-040b-4665-8244-dfa4dabe7a5c\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.468699 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8360c172-040b-4665-8244-dfa4dabe7a5c-logs\") pod \"8360c172-040b-4665-8244-dfa4dabe7a5c\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.469460 4884 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8360c172-040b-4665-8244-dfa4dabe7a5c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.470260 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8360c172-040b-4665-8244-dfa4dabe7a5c-logs" (OuterVolumeSpecName: "logs") pod "8360c172-040b-4665-8244-dfa4dabe7a5c" (UID: "8360c172-040b-4665-8244-dfa4dabe7a5c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.470583 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-46qgb"] Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.476574 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8360c172-040b-4665-8244-dfa4dabe7a5c-kube-api-access-6krj5" (OuterVolumeSpecName: "kube-api-access-6krj5") pod "8360c172-040b-4665-8244-dfa4dabe7a5c" (UID: "8360c172-040b-4665-8244-dfa4dabe7a5c"). InnerVolumeSpecName "kube-api-access-6krj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.477885 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8360c172-040b-4665-8244-dfa4dabe7a5c" (UID: "8360c172-040b-4665-8244-dfa4dabe7a5c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.486657 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-scripts" (OuterVolumeSpecName: "scripts") pod "8360c172-040b-4665-8244-dfa4dabe7a5c" (UID: "8360c172-040b-4665-8244-dfa4dabe7a5c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.542063 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8360c172-040b-4665-8244-dfa4dabe7a5c" (UID: "8360c172-040b-4665-8244-dfa4dabe7a5c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.570447 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data" (OuterVolumeSpecName: "config-data") pod "8360c172-040b-4665-8244-dfa4dabe7a5c" (UID: "8360c172-040b-4665-8244-dfa4dabe7a5c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.570736 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data\") pod \"8360c172-040b-4665-8244-dfa4dabe7a5c\" (UID: \"8360c172-040b-4665-8244-dfa4dabe7a5c\") " Dec 10 00:55:56 crc kubenswrapper[4884]: W1210 00:55:56.570841 4884 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/8360c172-040b-4665-8244-dfa4dabe7a5c/volumes/kubernetes.io~secret/config-data Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.570863 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data" (OuterVolumeSpecName: "config-data") pod "8360c172-040b-4665-8244-dfa4dabe7a5c" (UID: "8360c172-040b-4665-8244-dfa4dabe7a5c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.571293 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6krj5\" (UniqueName: \"kubernetes.io/projected/8360c172-040b-4665-8244-dfa4dabe7a5c-kube-api-access-6krj5\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.571312 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.571321 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.571330 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.571341 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8360c172-040b-4665-8244-dfa4dabe7a5c-logs\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:56 crc kubenswrapper[4884]: I1210 00:55:56.571349 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8360c172-040b-4665-8244-dfa4dabe7a5c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.284881 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7844c4d96d-4v75g"] Dec 10 00:55:57 crc kubenswrapper[4884]: E1210 00:55:57.285535 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8360c172-040b-4665-8244-dfa4dabe7a5c" containerName="cinder-api-log" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.285548 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8360c172-040b-4665-8244-dfa4dabe7a5c" containerName="cinder-api-log" Dec 10 00:55:57 crc kubenswrapper[4884]: E1210 00:55:57.285564 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8360c172-040b-4665-8244-dfa4dabe7a5c" containerName="cinder-api" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.285570 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8360c172-040b-4665-8244-dfa4dabe7a5c" containerName="cinder-api" Dec 10 00:55:57 crc kubenswrapper[4884]: E1210 00:55:57.285595 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="723220f4-e60b-42b3-aa46-010d8a624675" containerName="init" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.285601 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="723220f4-e60b-42b3-aa46-010d8a624675" containerName="init" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.285781 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8360c172-040b-4665-8244-dfa4dabe7a5c" containerName="cinder-api" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.285794 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="723220f4-e60b-42b3-aa46-010d8a624675" containerName="init" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.285806 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8360c172-040b-4665-8244-dfa4dabe7a5c" containerName="cinder-api-log" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.287422 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.305823 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.306282 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.321608 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="723220f4-e60b-42b3-aa46-010d8a624675" path="/var/lib/kubelet/pods/723220f4-e60b-42b3-aa46-010d8a624675/volumes" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.330732 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7844c4d96d-4v75g"] Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.333411 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40f84f97-adf7-479c-b0b1-671edb6e25d0","Type":"ContainerStarted","Data":"dc7c7938947157b7bf74dde3021291f0f04b9d4224e4db2e8d90917edffeb6ea"} Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.340642 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8360c172-040b-4665-8244-dfa4dabe7a5c","Type":"ContainerDied","Data":"16bfdba4d6b0edd160b5c901f6db10ff94d08cc1859a6550bd3c2cc24564421b"} Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.340692 4884 scope.go:117] "RemoveContainer" containerID="4f69cbcff8c617a9dffe34c719996fb8f2c8a0f3f506930b52abe5d4e907dfd9" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.340810 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.354639 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fffddc689-blxpq" event={"ID":"c921edd1-2c65-40c6-b946-df2ce3a6ed64","Type":"ContainerStarted","Data":"b0e646531a162506fca285b67a8a14f3b3a572aaf276d22ec423b5cf2b29329d"} Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.374588 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" event={"ID":"339b9f5b-e372-4d2c-b939-3dabc25eef48","Type":"ContainerStarted","Data":"f85f3bcac1d01cfb3dfba362b2256f9fe39ca4c54c0aec85961fb59005f0b9b5"} Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.385317 4884 scope.go:117] "RemoveContainer" containerID="e50f33282528fcd40669d3807c6df98998177f2e06fe5f5b12d08b888264de0a" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.399593 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqzth\" (UniqueName: \"kubernetes.io/projected/fd39a161-40d7-4f62-825a-9f022cd6d32e-kube-api-access-mqzth\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.399641 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd39a161-40d7-4f62-825a-9f022cd6d32e-logs\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.399670 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-combined-ca-bundle\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.399692 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-config-data\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.399728 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-public-tls-certs\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.399761 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-internal-tls-certs\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.399803 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-config-data-custom\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.405778 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.411073 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.425600 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-67b785fd9b-fkshr" podStartSLOduration=4.022988821 podStartE2EDuration="8.425582166s" podCreationTimestamp="2025-12-10 00:55:49 +0000 UTC" firstStartedPulling="2025-12-10 00:55:51.473718035 +0000 UTC m=+1524.551675152" lastFinishedPulling="2025-12-10 00:55:55.87631139 +0000 UTC m=+1528.954268497" observedRunningTime="2025-12-10 00:55:57.416148631 +0000 UTC m=+1530.494105768" watchObservedRunningTime="2025-12-10 00:55:57.425582166 +0000 UTC m=+1530.503539273" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.437042 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.438729 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.447244 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.447398 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.447470 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.469707 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.489133 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7fffddc689-blxpq" podStartSLOduration=3.916939649 podStartE2EDuration="8.489116917s" podCreationTimestamp="2025-12-10 00:55:49 +0000 UTC" firstStartedPulling="2025-12-10 00:55:51.302887469 +0000 UTC m=+1524.380844586" lastFinishedPulling="2025-12-10 00:55:55.875064737 +0000 UTC m=+1528.953021854" observedRunningTime="2025-12-10 00:55:57.467391388 +0000 UTC m=+1530.545348505" watchObservedRunningTime="2025-12-10 00:55:57.489116917 +0000 UTC m=+1530.567074034" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.501638 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqzth\" (UniqueName: \"kubernetes.io/projected/fd39a161-40d7-4f62-825a-9f022cd6d32e-kube-api-access-mqzth\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.501710 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd39a161-40d7-4f62-825a-9f022cd6d32e-logs\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.501740 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-combined-ca-bundle\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.501768 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-config-data\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.501846 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-public-tls-certs\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.501926 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-internal-tls-certs\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.502015 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-config-data-custom\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.506977 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd39a161-40d7-4f62-825a-9f022cd6d32e-logs\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.517126 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-internal-tls-certs\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.517198 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-public-tls-certs\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.524104 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-config-data-custom\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.528092 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-config-data\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.528484 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqzth\" (UniqueName: \"kubernetes.io/projected/fd39a161-40d7-4f62-825a-9f022cd6d32e-kube-api-access-mqzth\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.529285 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd39a161-40d7-4f62-825a-9f022cd6d32e-combined-ca-bundle\") pod \"barbican-api-7844c4d96d-4v75g\" (UID: \"fd39a161-40d7-4f62-825a-9f022cd6d32e\") " pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.603475 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-scripts\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.603809 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad703bf4-2568-4b7d-939d-cacee6ded9b3-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.603925 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k86h4\" (UniqueName: \"kubernetes.io/projected/ad703bf4-2568-4b7d-939d-cacee6ded9b3-kube-api-access-k86h4\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.604027 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-config-data-custom\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.604103 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-config-data\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.604219 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.604323 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.604446 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad703bf4-2568-4b7d-939d-cacee6ded9b3-logs\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.604537 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.633909 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.706460 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k86h4\" (UniqueName: \"kubernetes.io/projected/ad703bf4-2568-4b7d-939d-cacee6ded9b3-kube-api-access-k86h4\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.706761 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-config-data-custom\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.706920 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-config-data\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.707064 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.707204 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.707316 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad703bf4-2568-4b7d-939d-cacee6ded9b3-logs\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.707415 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.707608 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-scripts\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.707762 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad703bf4-2568-4b7d-939d-cacee6ded9b3-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.707919 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad703bf4-2568-4b7d-939d-cacee6ded9b3-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.711455 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad703bf4-2568-4b7d-939d-cacee6ded9b3-logs\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.711953 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.714606 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-config-data\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.717337 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.725462 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-scripts\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.727478 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.728400 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad703bf4-2568-4b7d-939d-cacee6ded9b3-config-data-custom\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.734780 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k86h4\" (UniqueName: \"kubernetes.io/projected/ad703bf4-2568-4b7d-939d-cacee6ded9b3-kube-api-access-k86h4\") pod \"cinder-api-0\" (UID: \"ad703bf4-2568-4b7d-939d-cacee6ded9b3\") " pod="openstack/cinder-api-0" Dec 10 00:55:57 crc kubenswrapper[4884]: I1210 00:55:57.770884 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 10 00:55:58 crc kubenswrapper[4884]: I1210 00:55:58.150956 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:55:58 crc kubenswrapper[4884]: W1210 00:55:58.247307 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd39a161_40d7_4f62_825a_9f022cd6d32e.slice/crio-d47c674f5afafaaa0d6cc0ca6aca46b6fd004dd90f16ca7597dcebb824993dc9 WatchSource:0}: Error finding container d47c674f5afafaaa0d6cc0ca6aca46b6fd004dd90f16ca7597dcebb824993dc9: Status 404 returned error can't find the container with id d47c674f5afafaaa0d6cc0ca6aca46b6fd004dd90f16ca7597dcebb824993dc9 Dec 10 00:55:58 crc kubenswrapper[4884]: I1210 00:55:58.275778 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7844c4d96d-4v75g"] Dec 10 00:55:58 crc kubenswrapper[4884]: I1210 00:55:58.399814 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7844c4d96d-4v75g" event={"ID":"fd39a161-40d7-4f62-825a-9f022cd6d32e","Type":"ContainerStarted","Data":"d47c674f5afafaaa0d6cc0ca6aca46b6fd004dd90f16ca7597dcebb824993dc9"} Dec 10 00:55:58 crc kubenswrapper[4884]: I1210 00:55:58.408601 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40f84f97-adf7-479c-b0b1-671edb6e25d0","Type":"ContainerStarted","Data":"a63f409c76303d58f9645fe94421599a6af57a8249bc1ea0c5cee516e2f869ee"} Dec 10 00:55:58 crc kubenswrapper[4884]: I1210 00:55:58.516147 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.990629555 podStartE2EDuration="7.516115549s" podCreationTimestamp="2025-12-10 00:55:51 +0000 UTC" firstStartedPulling="2025-12-10 00:55:52.348558846 +0000 UTC m=+1525.426515963" lastFinishedPulling="2025-12-10 00:55:55.87404484 +0000 UTC m=+1528.952001957" observedRunningTime="2025-12-10 00:55:58.510323872 +0000 UTC m=+1531.588280999" watchObservedRunningTime="2025-12-10 00:55:58.516115549 +0000 UTC m=+1531.594072666" Dec 10 00:55:58 crc kubenswrapper[4884]: I1210 00:55:58.554738 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 10 00:55:59 crc kubenswrapper[4884]: I1210 00:55:59.297978 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8360c172-040b-4665-8244-dfa4dabe7a5c" path="/var/lib/kubelet/pods/8360c172-040b-4665-8244-dfa4dabe7a5c/volumes" Dec 10 00:55:59 crc kubenswrapper[4884]: I1210 00:55:59.454749 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7844c4d96d-4v75g" event={"ID":"fd39a161-40d7-4f62-825a-9f022cd6d32e","Type":"ContainerStarted","Data":"abb4bebcc25ce28e18f155b05ad02b79a92cb6630ca35a623a4ec159da601c75"} Dec 10 00:55:59 crc kubenswrapper[4884]: I1210 00:55:59.454795 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7844c4d96d-4v75g" event={"ID":"fd39a161-40d7-4f62-825a-9f022cd6d32e","Type":"ContainerStarted","Data":"46e15b5947979e4316187bfe8a8ba6be29d6696741ca929fa921d9fe2df57421"} Dec 10 00:55:59 crc kubenswrapper[4884]: I1210 00:55:59.456553 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:59 crc kubenswrapper[4884]: I1210 00:55:59.456785 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:55:59 crc kubenswrapper[4884]: I1210 00:55:59.474548 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ad703bf4-2568-4b7d-939d-cacee6ded9b3","Type":"ContainerStarted","Data":"11e29c0d5c13f8ba4a9cbe33decc3aacc432c505d8facb232d65727a0c6a4191"} Dec 10 00:55:59 crc kubenswrapper[4884]: I1210 00:55:59.474597 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ad703bf4-2568-4b7d-939d-cacee6ded9b3","Type":"ContainerStarted","Data":"3f708f6186057f0ab416ea412c3281d9397bb7d1edb9f70ec337663dd632c8f4"} Dec 10 00:55:59 crc kubenswrapper[4884]: I1210 00:55:59.497012 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7844c4d96d-4v75g" podStartSLOduration=2.496991312 podStartE2EDuration="2.496991312s" podCreationTimestamp="2025-12-10 00:55:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:55:59.489350995 +0000 UTC m=+1532.567308112" watchObservedRunningTime="2025-12-10 00:55:59.496991312 +0000 UTC m=+1532.574948429" Dec 10 00:56:00 crc kubenswrapper[4884]: I1210 00:56:00.487144 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ad703bf4-2568-4b7d-939d-cacee6ded9b3","Type":"ContainerStarted","Data":"9d7ed45a1e8f52f16582c1bd72c655b644ee8320901b873632cb45f546ff923b"} Dec 10 00:56:00 crc kubenswrapper[4884]: I1210 00:56:00.487421 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 10 00:56:01 crc kubenswrapper[4884]: I1210 00:56:01.603149 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:56:01 crc kubenswrapper[4884]: I1210 00:56:01.632420 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.63239352 podStartE2EDuration="4.63239352s" podCreationTimestamp="2025-12-10 00:55:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:00.520802507 +0000 UTC m=+1533.598759624" watchObservedRunningTime="2025-12-10 00:56:01.63239352 +0000 UTC m=+1534.710350657" Dec 10 00:56:01 crc kubenswrapper[4884]: I1210 00:56:01.655986 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:56:01 crc kubenswrapper[4884]: I1210 00:56:01.771930 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 10 00:56:01 crc kubenswrapper[4884]: I1210 00:56:01.894832 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:56:01 crc kubenswrapper[4884]: I1210 00:56:01.971634 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-gw7fg"] Dec 10 00:56:01 crc kubenswrapper[4884]: I1210 00:56:01.971905 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" podUID="3d53bd28-0069-4228-a8dd-e0f065a80bb6" containerName="dnsmasq-dns" containerID="cri-o://662cdfc7178497f3ca9035f4105b066b98bed00449bc4aaed874f1bd12f203df" gracePeriod=10 Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.111798 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.510644 4884 generic.go:334] "Generic (PLEG): container finished" podID="3d53bd28-0069-4228-a8dd-e0f065a80bb6" containerID="662cdfc7178497f3ca9035f4105b066b98bed00449bc4aaed874f1bd12f203df" exitCode=0 Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.510870 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" event={"ID":"3d53bd28-0069-4228-a8dd-e0f065a80bb6","Type":"ContainerDied","Data":"662cdfc7178497f3ca9035f4105b066b98bed00449bc4aaed874f1bd12f203df"} Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.512524 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" event={"ID":"3d53bd28-0069-4228-a8dd-e0f065a80bb6","Type":"ContainerDied","Data":"ddfb7d392e4fec47a04ab6efa1e3356468fd5b221ff99f344a672ce107ecf095"} Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.512552 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ddfb7d392e4fec47a04ab6efa1e3356468fd5b221ff99f344a672ce107ecf095" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.548132 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.564269 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.695197 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-ovsdbserver-sb\") pod \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.695276 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-config\") pod \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.695310 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-dns-svc\") pod \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.695358 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-ovsdbserver-nb\") pod \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.695403 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhhv7\" (UniqueName: \"kubernetes.io/projected/3d53bd28-0069-4228-a8dd-e0f065a80bb6-kube-api-access-hhhv7\") pod \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.695419 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-dns-swift-storage-0\") pod \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\" (UID: \"3d53bd28-0069-4228-a8dd-e0f065a80bb6\") " Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.717602 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d53bd28-0069-4228-a8dd-e0f065a80bb6-kube-api-access-hhhv7" (OuterVolumeSpecName: "kube-api-access-hhhv7") pod "3d53bd28-0069-4228-a8dd-e0f065a80bb6" (UID: "3d53bd28-0069-4228-a8dd-e0f065a80bb6"). InnerVolumeSpecName "kube-api-access-hhhv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.799795 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhhv7\" (UniqueName: \"kubernetes.io/projected/3d53bd28-0069-4228-a8dd-e0f065a80bb6-kube-api-access-hhhv7\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.804058 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-config" (OuterVolumeSpecName: "config") pod "3d53bd28-0069-4228-a8dd-e0f065a80bb6" (UID: "3d53bd28-0069-4228-a8dd-e0f065a80bb6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.832496 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3d53bd28-0069-4228-a8dd-e0f065a80bb6" (UID: "3d53bd28-0069-4228-a8dd-e0f065a80bb6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.832784 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3d53bd28-0069-4228-a8dd-e0f065a80bb6" (UID: "3d53bd28-0069-4228-a8dd-e0f065a80bb6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.846973 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3d53bd28-0069-4228-a8dd-e0f065a80bb6" (UID: "3d53bd28-0069-4228-a8dd-e0f065a80bb6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.871871 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3d53bd28-0069-4228-a8dd-e0f065a80bb6" (UID: "3d53bd28-0069-4228-a8dd-e0f065a80bb6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.901715 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.901753 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.901762 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.901774 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:02 crc kubenswrapper[4884]: I1210 00:56:02.901783 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d53bd28-0069-4228-a8dd-e0f065a80bb6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:03 crc kubenswrapper[4884]: I1210 00:56:03.525696 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="40f84f97-adf7-479c-b0b1-671edb6e25d0" containerName="cinder-scheduler" containerID="cri-o://dc7c7938947157b7bf74dde3021291f0f04b9d4224e4db2e8d90917edffeb6ea" gracePeriod=30 Dec 10 00:56:03 crc kubenswrapper[4884]: I1210 00:56:03.525760 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-gw7fg" Dec 10 00:56:03 crc kubenswrapper[4884]: I1210 00:56:03.525818 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="40f84f97-adf7-479c-b0b1-671edb6e25d0" containerName="probe" containerID="cri-o://a63f409c76303d58f9645fe94421599a6af57a8249bc1ea0c5cee516e2f869ee" gracePeriod=30 Dec 10 00:56:03 crc kubenswrapper[4884]: I1210 00:56:03.560690 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-gw7fg"] Dec 10 00:56:03 crc kubenswrapper[4884]: I1210 00:56:03.571740 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-gw7fg"] Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.143342 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7656cd6689-46nqx" Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.215522 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7c6d9d796d-n2w9w"] Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.215892 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7c6d9d796d-n2w9w" podUID="1604e8e1-2382-4aa4-b645-332d4c1c00b1" containerName="neutron-api" containerID="cri-o://58071ff80b4d2d77aeda25b3bc4d5e8a137b5952afc30e3397a96bfec551af6e" gracePeriod=30 Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.216555 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7c6d9d796d-n2w9w" podUID="1604e8e1-2382-4aa4-b645-332d4c1c00b1" containerName="neutron-httpd" containerID="cri-o://99e32a4abaf2fd173eee66ec1de614db4a9ee006b9e7358e15f01402dca40202" gracePeriod=30 Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.535871 4884 generic.go:334] "Generic (PLEG): container finished" podID="40f84f97-adf7-479c-b0b1-671edb6e25d0" containerID="a63f409c76303d58f9645fe94421599a6af57a8249bc1ea0c5cee516e2f869ee" exitCode=0 Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.535905 4884 generic.go:334] "Generic (PLEG): container finished" podID="40f84f97-adf7-479c-b0b1-671edb6e25d0" containerID="dc7c7938947157b7bf74dde3021291f0f04b9d4224e4db2e8d90917edffeb6ea" exitCode=0 Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.535961 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40f84f97-adf7-479c-b0b1-671edb6e25d0","Type":"ContainerDied","Data":"a63f409c76303d58f9645fe94421599a6af57a8249bc1ea0c5cee516e2f869ee"} Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.536009 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40f84f97-adf7-479c-b0b1-671edb6e25d0","Type":"ContainerDied","Data":"dc7c7938947157b7bf74dde3021291f0f04b9d4224e4db2e8d90917edffeb6ea"} Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.537790 4884 generic.go:334] "Generic (PLEG): container finished" podID="1604e8e1-2382-4aa4-b645-332d4c1c00b1" containerID="99e32a4abaf2fd173eee66ec1de614db4a9ee006b9e7358e15f01402dca40202" exitCode=0 Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.537830 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7c6d9d796d-n2w9w" event={"ID":"1604e8e1-2382-4aa4-b645-332d4c1c00b1","Type":"ContainerDied","Data":"99e32a4abaf2fd173eee66ec1de614db4a9ee006b9e7358e15f01402dca40202"} Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.791758 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.845448 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfc7x\" (UniqueName: \"kubernetes.io/projected/40f84f97-adf7-479c-b0b1-671edb6e25d0-kube-api-access-cfc7x\") pod \"40f84f97-adf7-479c-b0b1-671edb6e25d0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.845593 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40f84f97-adf7-479c-b0b1-671edb6e25d0-etc-machine-id\") pod \"40f84f97-adf7-479c-b0b1-671edb6e25d0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.845663 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-scripts\") pod \"40f84f97-adf7-479c-b0b1-671edb6e25d0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.845697 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-config-data-custom\") pod \"40f84f97-adf7-479c-b0b1-671edb6e25d0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.845730 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-config-data\") pod \"40f84f97-adf7-479c-b0b1-671edb6e25d0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.845903 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-combined-ca-bundle\") pod \"40f84f97-adf7-479c-b0b1-671edb6e25d0\" (UID: \"40f84f97-adf7-479c-b0b1-671edb6e25d0\") " Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.846105 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/40f84f97-adf7-479c-b0b1-671edb6e25d0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "40f84f97-adf7-479c-b0b1-671edb6e25d0" (UID: "40f84f97-adf7-479c-b0b1-671edb6e25d0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.846491 4884 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40f84f97-adf7-479c-b0b1-671edb6e25d0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.851679 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-scripts" (OuterVolumeSpecName: "scripts") pod "40f84f97-adf7-479c-b0b1-671edb6e25d0" (UID: "40f84f97-adf7-479c-b0b1-671edb6e25d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.854345 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "40f84f97-adf7-479c-b0b1-671edb6e25d0" (UID: "40f84f97-adf7-479c-b0b1-671edb6e25d0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.856125 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40f84f97-adf7-479c-b0b1-671edb6e25d0-kube-api-access-cfc7x" (OuterVolumeSpecName: "kube-api-access-cfc7x") pod "40f84f97-adf7-479c-b0b1-671edb6e25d0" (UID: "40f84f97-adf7-479c-b0b1-671edb6e25d0"). InnerVolumeSpecName "kube-api-access-cfc7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.926012 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "40f84f97-adf7-479c-b0b1-671edb6e25d0" (UID: "40f84f97-adf7-479c-b0b1-671edb6e25d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.948549 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.948587 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfc7x\" (UniqueName: \"kubernetes.io/projected/40f84f97-adf7-479c-b0b1-671edb6e25d0-kube-api-access-cfc7x\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.948603 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.948614 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:04 crc kubenswrapper[4884]: I1210 00:56:04.976788 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-config-data" (OuterVolumeSpecName: "config-data") pod "40f84f97-adf7-479c-b0b1-671edb6e25d0" (UID: "40f84f97-adf7-479c-b0b1-671edb6e25d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.050399 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40f84f97-adf7-479c-b0b1-671edb6e25d0-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.297815 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d53bd28-0069-4228-a8dd-e0f065a80bb6" path="/var/lib/kubelet/pods/3d53bd28-0069-4228-a8dd-e0f065a80bb6/volumes" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.546810 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40f84f97-adf7-479c-b0b1-671edb6e25d0","Type":"ContainerDied","Data":"2181224bb94838e324460f8fd158442cadbd7c4320ef434ac9595eea1abc2850"} Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.546859 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.546874 4884 scope.go:117] "RemoveContainer" containerID="a63f409c76303d58f9645fe94421599a6af57a8249bc1ea0c5cee516e2f869ee" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.571343 4884 scope.go:117] "RemoveContainer" containerID="dc7c7938947157b7bf74dde3021291f0f04b9d4224e4db2e8d90917edffeb6ea" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.581668 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.597290 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.637949 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 00:56:05 crc kubenswrapper[4884]: E1210 00:56:05.638338 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d53bd28-0069-4228-a8dd-e0f065a80bb6" containerName="dnsmasq-dns" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.638355 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d53bd28-0069-4228-a8dd-e0f065a80bb6" containerName="dnsmasq-dns" Dec 10 00:56:05 crc kubenswrapper[4884]: E1210 00:56:05.638374 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40f84f97-adf7-479c-b0b1-671edb6e25d0" containerName="probe" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.638380 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="40f84f97-adf7-479c-b0b1-671edb6e25d0" containerName="probe" Dec 10 00:56:05 crc kubenswrapper[4884]: E1210 00:56:05.638395 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40f84f97-adf7-479c-b0b1-671edb6e25d0" containerName="cinder-scheduler" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.638401 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="40f84f97-adf7-479c-b0b1-671edb6e25d0" containerName="cinder-scheduler" Dec 10 00:56:05 crc kubenswrapper[4884]: E1210 00:56:05.638416 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d53bd28-0069-4228-a8dd-e0f065a80bb6" containerName="init" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.638422 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d53bd28-0069-4228-a8dd-e0f065a80bb6" containerName="init" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.638618 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="40f84f97-adf7-479c-b0b1-671edb6e25d0" containerName="cinder-scheduler" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.638640 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="40f84f97-adf7-479c-b0b1-671edb6e25d0" containerName="probe" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.638661 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d53bd28-0069-4228-a8dd-e0f065a80bb6" containerName="dnsmasq-dns" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.639673 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.642029 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.660321 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-config-data\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.660387 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-scripts\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.660472 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.660566 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fd7rv\" (UniqueName: \"kubernetes.io/projected/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-kube-api-access-fd7rv\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.660606 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.660719 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.662285 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.762074 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.762181 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-config-data\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.762234 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-scripts\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.762263 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.762316 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.762334 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fd7rv\" (UniqueName: \"kubernetes.io/projected/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-kube-api-access-fd7rv\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.762397 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.766412 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.767067 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.767403 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-scripts\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.768231 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-config-data\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.781816 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fd7rv\" (UniqueName: \"kubernetes.io/projected/ab33c12a-0d2e-4af7-a3a6-4069372a49a6-kube-api-access-fd7rv\") pod \"cinder-scheduler-0\" (UID: \"ab33c12a-0d2e-4af7-a3a6-4069372a49a6\") " pod="openstack/cinder-scheduler-0" Dec 10 00:56:05 crc kubenswrapper[4884]: I1210 00:56:05.960872 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 10 00:56:06 crc kubenswrapper[4884]: I1210 00:56:06.489780 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 10 00:56:06 crc kubenswrapper[4884]: I1210 00:56:06.568949 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ab33c12a-0d2e-4af7-a3a6-4069372a49a6","Type":"ContainerStarted","Data":"573267482ccacf86b9597f92e2fe91e760b5e6b8537d532ea58f06f097be8104"} Dec 10 00:56:07 crc kubenswrapper[4884]: I1210 00:56:07.325360 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40f84f97-adf7-479c-b0b1-671edb6e25d0" path="/var/lib/kubelet/pods/40f84f97-adf7-479c-b0b1-671edb6e25d0/volumes" Dec 10 00:56:07 crc kubenswrapper[4884]: I1210 00:56:07.588379 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ab33c12a-0d2e-4af7-a3a6-4069372a49a6","Type":"ContainerStarted","Data":"e52079703f5603c503aefde864e7a748bbaccd745f69852144ac324c0290dcb6"} Dec 10 00:56:08 crc kubenswrapper[4884]: I1210 00:56:08.471019 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-78b6b775cc-bbrl7" Dec 10 00:56:08 crc kubenswrapper[4884]: I1210 00:56:08.635766 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"ab33c12a-0d2e-4af7-a3a6-4069372a49a6","Type":"ContainerStarted","Data":"5706ec290e4b72a69224663cb6a5b21ac67c0600938def00adb8686a4a7dc941"} Dec 10 00:56:08 crc kubenswrapper[4884]: I1210 00:56:08.648576 4884 generic.go:334] "Generic (PLEG): container finished" podID="1604e8e1-2382-4aa4-b645-332d4c1c00b1" containerID="58071ff80b4d2d77aeda25b3bc4d5e8a137b5952afc30e3397a96bfec551af6e" exitCode=0 Dec 10 00:56:08 crc kubenswrapper[4884]: I1210 00:56:08.648616 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7c6d9d796d-n2w9w" event={"ID":"1604e8e1-2382-4aa4-b645-332d4c1c00b1","Type":"ContainerDied","Data":"58071ff80b4d2d77aeda25b3bc4d5e8a137b5952afc30e3397a96bfec551af6e"} Dec 10 00:56:08 crc kubenswrapper[4884]: I1210 00:56:08.698933 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.698910127 podStartE2EDuration="3.698910127s" podCreationTimestamp="2025-12-10 00:56:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:08.685766951 +0000 UTC m=+1541.763724068" watchObservedRunningTime="2025-12-10 00:56:08.698910127 +0000 UTC m=+1541.776867244" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.259008 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.358609 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-httpd-config\") pod \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.358715 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-config\") pod \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.358744 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-combined-ca-bundle\") pod \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.358859 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-ovndb-tls-certs\") pod \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.358894 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgm9s\" (UniqueName: \"kubernetes.io/projected/1604e8e1-2382-4aa4-b645-332d4c1c00b1-kube-api-access-jgm9s\") pod \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\" (UID: \"1604e8e1-2382-4aa4-b645-332d4c1c00b1\") " Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.375554 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "1604e8e1-2382-4aa4-b645-332d4c1c00b1" (UID: "1604e8e1-2382-4aa4-b645-332d4c1c00b1"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.375702 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1604e8e1-2382-4aa4-b645-332d4c1c00b1-kube-api-access-jgm9s" (OuterVolumeSpecName: "kube-api-access-jgm9s") pod "1604e8e1-2382-4aa4-b645-332d4c1c00b1" (UID: "1604e8e1-2382-4aa4-b645-332d4c1c00b1"). InnerVolumeSpecName "kube-api-access-jgm9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.436915 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-config" (OuterVolumeSpecName: "config") pod "1604e8e1-2382-4aa4-b645-332d4c1c00b1" (UID: "1604e8e1-2382-4aa4-b645-332d4c1c00b1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.437610 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1604e8e1-2382-4aa4-b645-332d4c1c00b1" (UID: "1604e8e1-2382-4aa4-b645-332d4c1c00b1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.446759 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "1604e8e1-2382-4aa4-b645-332d4c1c00b1" (UID: "1604e8e1-2382-4aa4-b645-332d4c1c00b1"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.463275 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.463478 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.463564 4884 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.463630 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgm9s\" (UniqueName: \"kubernetes.io/projected/1604e8e1-2382-4aa4-b645-332d4c1c00b1-kube-api-access-jgm9s\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.463688 4884 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1604e8e1-2382-4aa4-b645-332d4c1c00b1-httpd-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.618867 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.662473 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7844c4d96d-4v75g" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.669365 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7c6d9d796d-n2w9w" event={"ID":"1604e8e1-2382-4aa4-b645-332d4c1c00b1","Type":"ContainerDied","Data":"472a3b9222b9953e133d9f48d487da9eef1ea8d8a3a3af82e825c5e764db4f49"} Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.669426 4884 scope.go:117] "RemoveContainer" containerID="99e32a4abaf2fd173eee66ec1de614db4a9ee006b9e7358e15f01402dca40202" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.669576 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7c6d9d796d-n2w9w" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.715151 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7c6d9d796d-n2w9w"] Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.736660 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7c6d9d796d-n2w9w"] Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.745600 4884 scope.go:117] "RemoveContainer" containerID="58071ff80b4d2d77aeda25b3bc4d5e8a137b5952afc30e3397a96bfec551af6e" Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.755771 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-858b44c594-bz25h"] Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.755990 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-858b44c594-bz25h" podUID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" containerName="barbican-api-log" containerID="cri-o://3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7" gracePeriod=30 Dec 10 00:56:09 crc kubenswrapper[4884]: I1210 00:56:09.756559 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-858b44c594-bz25h" podUID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" containerName="barbican-api" containerID="cri-o://12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39" gracePeriod=30 Dec 10 00:56:10 crc kubenswrapper[4884]: I1210 00:56:10.369158 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Dec 10 00:56:10 crc kubenswrapper[4884]: I1210 00:56:10.688989 4884 generic.go:334] "Generic (PLEG): container finished" podID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" containerID="3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7" exitCode=143 Dec 10 00:56:10 crc kubenswrapper[4884]: I1210 00:56:10.689069 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-858b44c594-bz25h" event={"ID":"2697b83e-e631-4daa-bbcd-eba9dbb4762a","Type":"ContainerDied","Data":"3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7"} Dec 10 00:56:10 crc kubenswrapper[4884]: I1210 00:56:10.961475 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.027628 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 10 00:56:11 crc kubenswrapper[4884]: E1210 00:56:11.028010 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1604e8e1-2382-4aa4-b645-332d4c1c00b1" containerName="neutron-api" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.028026 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1604e8e1-2382-4aa4-b645-332d4c1c00b1" containerName="neutron-api" Dec 10 00:56:11 crc kubenswrapper[4884]: E1210 00:56:11.028041 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1604e8e1-2382-4aa4-b645-332d4c1c00b1" containerName="neutron-httpd" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.028048 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1604e8e1-2382-4aa4-b645-332d4c1c00b1" containerName="neutron-httpd" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.028243 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1604e8e1-2382-4aa4-b645-332d4c1c00b1" containerName="neutron-api" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.028266 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1604e8e1-2382-4aa4-b645-332d4c1c00b1" containerName="neutron-httpd" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.028994 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.034351 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.034424 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-wx68l" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.034595 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.065549 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.117700 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776e0297-376b-4a69-aefb-8ed08cf6e8ea-combined-ca-bundle\") pod \"openstackclient\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.117993 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/776e0297-376b-4a69-aefb-8ed08cf6e8ea-openstack-config\") pod \"openstackclient\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.118059 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/776e0297-376b-4a69-aefb-8ed08cf6e8ea-openstack-config-secret\") pod \"openstackclient\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.118134 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggpkx\" (UniqueName: \"kubernetes.io/projected/776e0297-376b-4a69-aefb-8ed08cf6e8ea-kube-api-access-ggpkx\") pod \"openstackclient\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.219976 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776e0297-376b-4a69-aefb-8ed08cf6e8ea-combined-ca-bundle\") pod \"openstackclient\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.220028 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/776e0297-376b-4a69-aefb-8ed08cf6e8ea-openstack-config\") pod \"openstackclient\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.220082 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/776e0297-376b-4a69-aefb-8ed08cf6e8ea-openstack-config-secret\") pod \"openstackclient\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.220173 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggpkx\" (UniqueName: \"kubernetes.io/projected/776e0297-376b-4a69-aefb-8ed08cf6e8ea-kube-api-access-ggpkx\") pod \"openstackclient\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.221076 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/776e0297-376b-4a69-aefb-8ed08cf6e8ea-openstack-config\") pod \"openstackclient\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: E1210 00:56:11.221844 4884 projected.go:194] Error preparing data for projected volume kube-api-access-ggpkx for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: User "system:node:crc" cannot create resource "serviceaccounts/token" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Dec 10 00:56:11 crc kubenswrapper[4884]: E1210 00:56:11.221912 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/776e0297-376b-4a69-aefb-8ed08cf6e8ea-kube-api-access-ggpkx podName:776e0297-376b-4a69-aefb-8ed08cf6e8ea nodeName:}" failed. No retries permitted until 2025-12-10 00:56:11.721892881 +0000 UTC m=+1544.799849988 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-ggpkx" (UniqueName: "kubernetes.io/projected/776e0297-376b-4a69-aefb-8ed08cf6e8ea-kube-api-access-ggpkx") pod "openstackclient" (UID: "776e0297-376b-4a69-aefb-8ed08cf6e8ea") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: User "system:node:crc" cannot create resource "serviceaccounts/token" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.237728 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.237799 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Dec 10 00:56:11 crc kubenswrapper[4884]: E1210 00:56:11.238626 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle kube-api-access-ggpkx openstack-config-secret], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/openstackclient" podUID="776e0297-376b-4a69-aefb-8ed08cf6e8ea" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.250838 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/776e0297-376b-4a69-aefb-8ed08cf6e8ea-openstack-config-secret\") pod \"openstackclient\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.251716 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776e0297-376b-4a69-aefb-8ed08cf6e8ea-combined-ca-bundle\") pod \"openstackclient\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.257642 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.259569 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.271231 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.301943 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1604e8e1-2382-4aa4-b645-332d4c1c00b1" path="/var/lib/kubelet/pods/1604e8e1-2382-4aa4-b645-332d4c1c00b1/volumes" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.321818 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b215a749-e579-4854-a2f7-ebaf6c3416a8-openstack-config-secret\") pod \"openstackclient\" (UID: \"b215a749-e579-4854-a2f7-ebaf6c3416a8\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.322013 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp8sx\" (UniqueName: \"kubernetes.io/projected/b215a749-e579-4854-a2f7-ebaf6c3416a8-kube-api-access-rp8sx\") pod \"openstackclient\" (UID: \"b215a749-e579-4854-a2f7-ebaf6c3416a8\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.322133 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b215a749-e579-4854-a2f7-ebaf6c3416a8-openstack-config\") pod \"openstackclient\" (UID: \"b215a749-e579-4854-a2f7-ebaf6c3416a8\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.322237 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b215a749-e579-4854-a2f7-ebaf6c3416a8-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b215a749-e579-4854-a2f7-ebaf6c3416a8\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.424555 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp8sx\" (UniqueName: \"kubernetes.io/projected/b215a749-e579-4854-a2f7-ebaf6c3416a8-kube-api-access-rp8sx\") pod \"openstackclient\" (UID: \"b215a749-e579-4854-a2f7-ebaf6c3416a8\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.424664 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b215a749-e579-4854-a2f7-ebaf6c3416a8-openstack-config\") pod \"openstackclient\" (UID: \"b215a749-e579-4854-a2f7-ebaf6c3416a8\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.424710 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b215a749-e579-4854-a2f7-ebaf6c3416a8-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b215a749-e579-4854-a2f7-ebaf6c3416a8\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.424772 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b215a749-e579-4854-a2f7-ebaf6c3416a8-openstack-config-secret\") pod \"openstackclient\" (UID: \"b215a749-e579-4854-a2f7-ebaf6c3416a8\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.425622 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b215a749-e579-4854-a2f7-ebaf6c3416a8-openstack-config\") pod \"openstackclient\" (UID: \"b215a749-e579-4854-a2f7-ebaf6c3416a8\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.429012 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b215a749-e579-4854-a2f7-ebaf6c3416a8-openstack-config-secret\") pod \"openstackclient\" (UID: \"b215a749-e579-4854-a2f7-ebaf6c3416a8\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.432145 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b215a749-e579-4854-a2f7-ebaf6c3416a8-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b215a749-e579-4854-a2f7-ebaf6c3416a8\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.450417 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp8sx\" (UniqueName: \"kubernetes.io/projected/b215a749-e579-4854-a2f7-ebaf6c3416a8-kube-api-access-rp8sx\") pod \"openstackclient\" (UID: \"b215a749-e579-4854-a2f7-ebaf6c3416a8\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.638239 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.697670 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.713470 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.717579 4884 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="776e0297-376b-4a69-aefb-8ed08cf6e8ea" podUID="b215a749-e579-4854-a2f7-ebaf6c3416a8" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.729570 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggpkx\" (UniqueName: \"kubernetes.io/projected/776e0297-376b-4a69-aefb-8ed08cf6e8ea-kube-api-access-ggpkx\") pod \"openstackclient\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " pod="openstack/openstackclient" Dec 10 00:56:11 crc kubenswrapper[4884]: E1210 00:56:11.733692 4884 projected.go:194] Error preparing data for projected volume kube-api-access-ggpkx for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (776e0297-376b-4a69-aefb-8ed08cf6e8ea) does not match the UID in record. The object might have been deleted and then recreated Dec 10 00:56:11 crc kubenswrapper[4884]: E1210 00:56:11.733762 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/776e0297-376b-4a69-aefb-8ed08cf6e8ea-kube-api-access-ggpkx podName:776e0297-376b-4a69-aefb-8ed08cf6e8ea nodeName:}" failed. No retries permitted until 2025-12-10 00:56:12.733743173 +0000 UTC m=+1545.811700290 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-ggpkx" (UniqueName: "kubernetes.io/projected/776e0297-376b-4a69-aefb-8ed08cf6e8ea-kube-api-access-ggpkx") pod "openstackclient" (UID: "776e0297-376b-4a69-aefb-8ed08cf6e8ea") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (776e0297-376b-4a69-aefb-8ed08cf6e8ea) does not match the UID in record. The object might have been deleted and then recreated Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.830377 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/776e0297-376b-4a69-aefb-8ed08cf6e8ea-openstack-config-secret\") pod \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.830810 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776e0297-376b-4a69-aefb-8ed08cf6e8ea-combined-ca-bundle\") pod \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.830935 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/776e0297-376b-4a69-aefb-8ed08cf6e8ea-openstack-config\") pod \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\" (UID: \"776e0297-376b-4a69-aefb-8ed08cf6e8ea\") " Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.831582 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/776e0297-376b-4a69-aefb-8ed08cf6e8ea-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "776e0297-376b-4a69-aefb-8ed08cf6e8ea" (UID: "776e0297-376b-4a69-aefb-8ed08cf6e8ea"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.831880 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ggpkx\" (UniqueName: \"kubernetes.io/projected/776e0297-376b-4a69-aefb-8ed08cf6e8ea-kube-api-access-ggpkx\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.831900 4884 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/776e0297-376b-4a69-aefb-8ed08cf6e8ea-openstack-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.836166 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/776e0297-376b-4a69-aefb-8ed08cf6e8ea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "776e0297-376b-4a69-aefb-8ed08cf6e8ea" (UID: "776e0297-376b-4a69-aefb-8ed08cf6e8ea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.836656 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/776e0297-376b-4a69-aefb-8ed08cf6e8ea-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "776e0297-376b-4a69-aefb-8ed08cf6e8ea" (UID: "776e0297-376b-4a69-aefb-8ed08cf6e8ea"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.933919 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776e0297-376b-4a69-aefb-8ed08cf6e8ea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:11 crc kubenswrapper[4884]: I1210 00:56:11.933953 4884 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/776e0297-376b-4a69-aefb-8ed08cf6e8ea-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:12 crc kubenswrapper[4884]: I1210 00:56:12.102310 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 10 00:56:12 crc kubenswrapper[4884]: I1210 00:56:12.710274 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"b215a749-e579-4854-a2f7-ebaf6c3416a8","Type":"ContainerStarted","Data":"b7f63777838dd946b6c0dfe166462b3393a695eef740a16f36847b379d4f1c16"} Dec 10 00:56:12 crc kubenswrapper[4884]: I1210 00:56:12.710276 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 10 00:56:12 crc kubenswrapper[4884]: I1210 00:56:12.723690 4884 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="776e0297-376b-4a69-aefb-8ed08cf6e8ea" podUID="b215a749-e579-4854-a2f7-ebaf6c3416a8" Dec 10 00:56:12 crc kubenswrapper[4884]: I1210 00:56:12.919522 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-858b44c594-bz25h" podUID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.183:9311/healthcheck\": read tcp 10.217.0.2:52814->10.217.0.183:9311: read: connection reset by peer" Dec 10 00:56:12 crc kubenswrapper[4884]: I1210 00:56:12.919529 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-858b44c594-bz25h" podUID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.183:9311/healthcheck\": read tcp 10.217.0.2:52798->10.217.0.183:9311: read: connection reset by peer" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.312300 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="776e0297-376b-4a69-aefb-8ed08cf6e8ea" path="/var/lib/kubelet/pods/776e0297-376b-4a69-aefb-8ed08cf6e8ea/volumes" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.424355 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.568322 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-config-data\") pod \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.568707 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2697b83e-e631-4daa-bbcd-eba9dbb4762a-logs\") pod \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.568812 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-config-data-custom\") pod \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.568895 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4tkk\" (UniqueName: \"kubernetes.io/projected/2697b83e-e631-4daa-bbcd-eba9dbb4762a-kube-api-access-n4tkk\") pod \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.569038 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-combined-ca-bundle\") pod \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\" (UID: \"2697b83e-e631-4daa-bbcd-eba9dbb4762a\") " Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.569376 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2697b83e-e631-4daa-bbcd-eba9dbb4762a-logs" (OuterVolumeSpecName: "logs") pod "2697b83e-e631-4daa-bbcd-eba9dbb4762a" (UID: "2697b83e-e631-4daa-bbcd-eba9dbb4762a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.569617 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2697b83e-e631-4daa-bbcd-eba9dbb4762a-logs\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.579573 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2697b83e-e631-4daa-bbcd-eba9dbb4762a-kube-api-access-n4tkk" (OuterVolumeSpecName: "kube-api-access-n4tkk") pod "2697b83e-e631-4daa-bbcd-eba9dbb4762a" (UID: "2697b83e-e631-4daa-bbcd-eba9dbb4762a"). InnerVolumeSpecName "kube-api-access-n4tkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.606479 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2697b83e-e631-4daa-bbcd-eba9dbb4762a" (UID: "2697b83e-e631-4daa-bbcd-eba9dbb4762a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.610558 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2697b83e-e631-4daa-bbcd-eba9dbb4762a" (UID: "2697b83e-e631-4daa-bbcd-eba9dbb4762a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.643541 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-config-data" (OuterVolumeSpecName: "config-data") pod "2697b83e-e631-4daa-bbcd-eba9dbb4762a" (UID: "2697b83e-e631-4daa-bbcd-eba9dbb4762a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.674811 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.674842 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.674856 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4tkk\" (UniqueName: \"kubernetes.io/projected/2697b83e-e631-4daa-bbcd-eba9dbb4762a-kube-api-access-n4tkk\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.674868 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2697b83e-e631-4daa-bbcd-eba9dbb4762a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.733152 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-858b44c594-bz25h" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.733188 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-858b44c594-bz25h" event={"ID":"2697b83e-e631-4daa-bbcd-eba9dbb4762a","Type":"ContainerDied","Data":"12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39"} Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.733266 4884 scope.go:117] "RemoveContainer" containerID="12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.733524 4884 generic.go:334] "Generic (PLEG): container finished" podID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" containerID="12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39" exitCode=0 Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.734046 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-858b44c594-bz25h" event={"ID":"2697b83e-e631-4daa-bbcd-eba9dbb4762a","Type":"ContainerDied","Data":"ca4dbbaaccf4feab827efd763393ff340553ad3b2bd2e9545229612c1a641126"} Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.775905 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-858b44c594-bz25h"] Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.795242 4884 scope.go:117] "RemoveContainer" containerID="3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.819940 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-858b44c594-bz25h"] Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.848740 4884 scope.go:117] "RemoveContainer" containerID="12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39" Dec 10 00:56:13 crc kubenswrapper[4884]: E1210 00:56:13.849644 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39\": container with ID starting with 12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39 not found: ID does not exist" containerID="12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.849676 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39"} err="failed to get container status \"12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39\": rpc error: code = NotFound desc = could not find container \"12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39\": container with ID starting with 12cefbb44c40cb36415dd393175d9d247f7778e5fab8e052c01e11ab45a50b39 not found: ID does not exist" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.849699 4884 scope.go:117] "RemoveContainer" containerID="3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7" Dec 10 00:56:13 crc kubenswrapper[4884]: E1210 00:56:13.849918 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7\": container with ID starting with 3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7 not found: ID does not exist" containerID="3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7" Dec 10 00:56:13 crc kubenswrapper[4884]: I1210 00:56:13.849937 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7"} err="failed to get container status \"3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7\": rpc error: code = NotFound desc = could not find container \"3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7\": container with ID starting with 3720ddf1c0743454a38456c8d16078a90405637ffc944b3a7cb4d09bb21c21f7 not found: ID does not exist" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.448738 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-c95474448-hcdj6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.455565 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-58c87ffb89-4m2v2"] Dec 10 00:56:14 crc kubenswrapper[4884]: E1210 00:56:14.456006 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" containerName="barbican-api-log" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.456020 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" containerName="barbican-api-log" Dec 10 00:56:14 crc kubenswrapper[4884]: E1210 00:56:14.456040 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" containerName="barbican-api" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.456047 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" containerName="barbican-api" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.456255 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" containerName="barbican-api" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.456266 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" containerName="barbican-api-log" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.456950 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.460470 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.460685 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.460810 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-9kwc7" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.467635 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-58c87ffb89-4m2v2"] Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.551557 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-79b9f4dd9f-25vz6"] Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.552981 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.556751 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.564382 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-79b9f4dd9f-25vz6"] Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.593512 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-config-data\") pod \"heat-engine-58c87ffb89-4m2v2\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.593565 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-combined-ca-bundle\") pod \"heat-engine-58c87ffb89-4m2v2\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.593651 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gd6c\" (UniqueName: \"kubernetes.io/projected/9eff8542-e352-4f68-bb2b-aa02df1e06f8-kube-api-access-2gd6c\") pod \"heat-engine-58c87ffb89-4m2v2\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.593696 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-config-data-custom\") pod \"heat-engine-58c87ffb89-4m2v2\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.607085 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-c95474448-hcdj6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.642238 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-8c56c4785-52p9j"] Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.643489 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.647358 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.669688 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-8c56c4785-52p9j"] Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.701623 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-combined-ca-bundle\") pod \"heat-engine-58c87ffb89-4m2v2\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.701720 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz26l\" (UniqueName: \"kubernetes.io/projected/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-kube-api-access-lz26l\") pod \"heat-cfnapi-79b9f4dd9f-25vz6\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.701781 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-config-data-custom\") pod \"heat-cfnapi-79b9f4dd9f-25vz6\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.701807 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gd6c\" (UniqueName: \"kubernetes.io/projected/9eff8542-e352-4f68-bb2b-aa02df1e06f8-kube-api-access-2gd6c\") pod \"heat-engine-58c87ffb89-4m2v2\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.701833 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-config-data\") pod \"heat-cfnapi-79b9f4dd9f-25vz6\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.701909 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-config-data-custom\") pod \"heat-engine-58c87ffb89-4m2v2\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.704842 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-combined-ca-bundle\") pod \"heat-cfnapi-79b9f4dd9f-25vz6\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.704985 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-config-data\") pod \"heat-engine-58c87ffb89-4m2v2\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.710498 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-config-data-custom\") pod \"heat-engine-58c87ffb89-4m2v2\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.712080 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-combined-ca-bundle\") pod \"heat-engine-58c87ffb89-4m2v2\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.723423 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gd6c\" (UniqueName: \"kubernetes.io/projected/9eff8542-e352-4f68-bb2b-aa02df1e06f8-kube-api-access-2gd6c\") pod \"heat-engine-58c87ffb89-4m2v2\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.726216 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-config-data\") pod \"heat-engine-58c87ffb89-4m2v2\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.726282 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-d5kx9"] Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.727979 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.734183 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-d5kx9"] Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.797626 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.807033 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-dns-swift-storage-0\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.807124 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-config-data\") pod \"heat-api-8c56c4785-52p9j\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.807161 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-combined-ca-bundle\") pod \"heat-cfnapi-79b9f4dd9f-25vz6\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.807196 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d9jk\" (UniqueName: \"kubernetes.io/projected/9ea81429-d4fc-42e1-b626-dfa345c7f698-kube-api-access-5d9jk\") pod \"heat-api-8c56c4785-52p9j\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.807212 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-ovsdbserver-sb\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.807237 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-ovsdbserver-nb\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.807270 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-config\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.807297 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz26l\" (UniqueName: \"kubernetes.io/projected/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-kube-api-access-lz26l\") pod \"heat-cfnapi-79b9f4dd9f-25vz6\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.807328 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-dns-svc\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.807474 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-combined-ca-bundle\") pod \"heat-api-8c56c4785-52p9j\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.807955 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2hmp\" (UniqueName: \"kubernetes.io/projected/966092e4-bb27-4e13-97e1-46f55c562a7f-kube-api-access-d2hmp\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.807981 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-config-data-custom\") pod \"heat-cfnapi-79b9f4dd9f-25vz6\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.808020 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-config-data\") pod \"heat-cfnapi-79b9f4dd9f-25vz6\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.808112 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-config-data-custom\") pod \"heat-api-8c56c4785-52p9j\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.814866 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-config-data-custom\") pod \"heat-cfnapi-79b9f4dd9f-25vz6\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.815762 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-combined-ca-bundle\") pod \"heat-cfnapi-79b9f4dd9f-25vz6\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.823150 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-config-data\") pod \"heat-cfnapi-79b9f4dd9f-25vz6\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.832677 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz26l\" (UniqueName: \"kubernetes.io/projected/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-kube-api-access-lz26l\") pod \"heat-cfnapi-79b9f4dd9f-25vz6\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.895090 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.917622 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2hmp\" (UniqueName: \"kubernetes.io/projected/966092e4-bb27-4e13-97e1-46f55c562a7f-kube-api-access-d2hmp\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.917848 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-config-data-custom\") pod \"heat-api-8c56c4785-52p9j\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.917913 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-dns-swift-storage-0\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.918006 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-config-data\") pod \"heat-api-8c56c4785-52p9j\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.918082 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d9jk\" (UniqueName: \"kubernetes.io/projected/9ea81429-d4fc-42e1-b626-dfa345c7f698-kube-api-access-5d9jk\") pod \"heat-api-8c56c4785-52p9j\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.918097 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-ovsdbserver-sb\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.918145 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-ovsdbserver-nb\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.918208 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-config\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.918263 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-dns-svc\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.918291 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-combined-ca-bundle\") pod \"heat-api-8c56c4785-52p9j\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.919667 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-dns-swift-storage-0\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.920324 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-ovsdbserver-nb\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.920331 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-config\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.920922 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-ovsdbserver-sb\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.921171 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-dns-svc\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.924733 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-config-data-custom\") pod \"heat-api-8c56c4785-52p9j\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.928552 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-config-data\") pod \"heat-api-8c56c4785-52p9j\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.929155 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-combined-ca-bundle\") pod \"heat-api-8c56c4785-52p9j\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.943163 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d9jk\" (UniqueName: \"kubernetes.io/projected/9ea81429-d4fc-42e1-b626-dfa345c7f698-kube-api-access-5d9jk\") pod \"heat-api-8c56c4785-52p9j\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.943168 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2hmp\" (UniqueName: \"kubernetes.io/projected/966092e4-bb27-4e13-97e1-46f55c562a7f-kube-api-access-d2hmp\") pod \"dnsmasq-dns-688b9f5b49-d5kx9\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:14 crc kubenswrapper[4884]: I1210 00:56:14.997978 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:15 crc kubenswrapper[4884]: I1210 00:56:15.229260 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:15 crc kubenswrapper[4884]: I1210 00:56:15.307512 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2697b83e-e631-4daa-bbcd-eba9dbb4762a" path="/var/lib/kubelet/pods/2697b83e-e631-4daa-bbcd-eba9dbb4762a/volumes" Dec 10 00:56:15 crc kubenswrapper[4884]: I1210 00:56:15.406390 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-58c87ffb89-4m2v2"] Dec 10 00:56:15 crc kubenswrapper[4884]: I1210 00:56:15.530850 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-79b9f4dd9f-25vz6"] Dec 10 00:56:15 crc kubenswrapper[4884]: I1210 00:56:15.753023 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-8c56c4785-52p9j"] Dec 10 00:56:15 crc kubenswrapper[4884]: W1210 00:56:15.761298 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ea81429_d4fc_42e1_b626_dfa345c7f698.slice/crio-2dad861742e8cfc70e682f33a138f0831ab59f0ac6507f7cac4bfe9e9175b104 WatchSource:0}: Error finding container 2dad861742e8cfc70e682f33a138f0831ab59f0ac6507f7cac4bfe9e9175b104: Status 404 returned error can't find the container with id 2dad861742e8cfc70e682f33a138f0831ab59f0ac6507f7cac4bfe9e9175b104 Dec 10 00:56:15 crc kubenswrapper[4884]: I1210 00:56:15.776685 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-58c87ffb89-4m2v2" event={"ID":"9eff8542-e352-4f68-bb2b-aa02df1e06f8","Type":"ContainerStarted","Data":"a161d9c7379232224139d9ad222216f92ed54460dd8444af97e828a61b521372"} Dec 10 00:56:15 crc kubenswrapper[4884]: I1210 00:56:15.778793 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" event={"ID":"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f","Type":"ContainerStarted","Data":"b082e5cf709dc39dba6db9fde4b652e7730968ca0878856bd3029758b6e56ec5"} Dec 10 00:56:15 crc kubenswrapper[4884]: I1210 00:56:15.866370 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-d5kx9"] Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.213730 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.820744 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-8c56c4785-52p9j" event={"ID":"9ea81429-d4fc-42e1-b626-dfa345c7f698","Type":"ContainerStarted","Data":"2dad861742e8cfc70e682f33a138f0831ab59f0ac6507f7cac4bfe9e9175b104"} Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.825113 4884 generic.go:334] "Generic (PLEG): container finished" podID="966092e4-bb27-4e13-97e1-46f55c562a7f" containerID="9c89aab72737d2173dcd213a3e2c5e5a26d262bc0e3af2013f7ca268400878fd" exitCode=0 Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.825160 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" event={"ID":"966092e4-bb27-4e13-97e1-46f55c562a7f","Type":"ContainerDied","Data":"9c89aab72737d2173dcd213a3e2c5e5a26d262bc0e3af2013f7ca268400878fd"} Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.825178 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" event={"ID":"966092e4-bb27-4e13-97e1-46f55c562a7f","Type":"ContainerStarted","Data":"6882f0fc5675004a5153bccf724fb4554f37aa038a1fcd804b2fd950133f6c70"} Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.829699 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-58c87ffb89-4m2v2" event={"ID":"9eff8542-e352-4f68-bb2b-aa02df1e06f8","Type":"ContainerStarted","Data":"77f7d30e110d99a17d1e200c8fcc890cd15ec0c5fe48e0b7a6db365309eae252"} Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.830548 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.868868 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-58c87ffb89-4m2v2" podStartSLOduration=2.868831205 podStartE2EDuration="2.868831205s" podCreationTimestamp="2025-12-10 00:56:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:16.867856709 +0000 UTC m=+1549.945813836" watchObservedRunningTime="2025-12-10 00:56:16.868831205 +0000 UTC m=+1549.946788322" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.908408 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-8567bf5cf5-sh595"] Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.910239 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.916133 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-8567bf5cf5-sh595"] Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.916532 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.916704 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.918540 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.961909 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9b2136d-c328-4096-b478-ad8836adf843-run-httpd\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.962017 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9b2136d-c328-4096-b478-ad8836adf843-internal-tls-certs\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.962037 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9b2136d-c328-4096-b478-ad8836adf843-public-tls-certs\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.962100 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9b2136d-c328-4096-b478-ad8836adf843-log-httpd\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.962150 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v64tb\" (UniqueName: \"kubernetes.io/projected/b9b2136d-c328-4096-b478-ad8836adf843-kube-api-access-v64tb\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.962192 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b2136d-c328-4096-b478-ad8836adf843-config-data\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.962220 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b9b2136d-c328-4096-b478-ad8836adf843-etc-swift\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:16 crc kubenswrapper[4884]: I1210 00:56:16.962252 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b2136d-c328-4096-b478-ad8836adf843-combined-ca-bundle\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.065089 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9b2136d-c328-4096-b478-ad8836adf843-internal-tls-certs\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.065127 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9b2136d-c328-4096-b478-ad8836adf843-public-tls-certs\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.065170 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9b2136d-c328-4096-b478-ad8836adf843-log-httpd\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.065212 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v64tb\" (UniqueName: \"kubernetes.io/projected/b9b2136d-c328-4096-b478-ad8836adf843-kube-api-access-v64tb\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.065255 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b2136d-c328-4096-b478-ad8836adf843-config-data\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.065284 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b9b2136d-c328-4096-b478-ad8836adf843-etc-swift\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.065311 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b2136d-c328-4096-b478-ad8836adf843-combined-ca-bundle\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.065336 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9b2136d-c328-4096-b478-ad8836adf843-run-httpd\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.065928 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9b2136d-c328-4096-b478-ad8836adf843-run-httpd\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.072043 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b9b2136d-c328-4096-b478-ad8836adf843-log-httpd\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.079635 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9b2136d-c328-4096-b478-ad8836adf843-public-tls-certs\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.084157 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9b2136d-c328-4096-b478-ad8836adf843-combined-ca-bundle\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.084389 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9b2136d-c328-4096-b478-ad8836adf843-config-data\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.085149 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/b9b2136d-c328-4096-b478-ad8836adf843-etc-swift\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.085280 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9b2136d-c328-4096-b478-ad8836adf843-internal-tls-certs\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.096486 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v64tb\" (UniqueName: \"kubernetes.io/projected/b9b2136d-c328-4096-b478-ad8836adf843-kube-api-access-v64tb\") pod \"swift-proxy-8567bf5cf5-sh595\" (UID: \"b9b2136d-c328-4096-b478-ad8836adf843\") " pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:17 crc kubenswrapper[4884]: I1210 00:56:17.285047 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.098514 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.098907 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.098968 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.099926 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.100003 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" gracePeriod=600 Dec 10 00:56:18 crc kubenswrapper[4884]: E1210 00:56:18.267498 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.868860 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" exitCode=0 Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.869108 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957"} Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.869468 4884 scope.go:117] "RemoveContainer" containerID="c301699d2f400451cf0d2a7c3e824313cd2be40d6ee80bb600b0e9f8df69938a" Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.870133 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:56:18 crc kubenswrapper[4884]: E1210 00:56:18.870510 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.877694 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" event={"ID":"966092e4-bb27-4e13-97e1-46f55c562a7f","Type":"ContainerStarted","Data":"db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09"} Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.878564 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.896728 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-8c56c4785-52p9j" event={"ID":"9ea81429-d4fc-42e1-b626-dfa345c7f698","Type":"ContainerStarted","Data":"d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602"} Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.897540 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.913714 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" event={"ID":"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f","Type":"ContainerStarted","Data":"a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707"} Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.948851 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.923920 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" podStartSLOduration=4.923901158 podStartE2EDuration="4.923901158s" podCreationTimestamp="2025-12-10 00:56:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:18.920263039 +0000 UTC m=+1551.998220176" watchObservedRunningTime="2025-12-10 00:56:18.923901158 +0000 UTC m=+1552.001858275" Dec 10 00:56:18 crc kubenswrapper[4884]: I1210 00:56:18.992425 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-8c56c4785-52p9j" podStartSLOduration=2.452459949 podStartE2EDuration="4.992406973s" podCreationTimestamp="2025-12-10 00:56:14 +0000 UTC" firstStartedPulling="2025-12-10 00:56:15.766133513 +0000 UTC m=+1548.844090630" lastFinishedPulling="2025-12-10 00:56:18.306080547 +0000 UTC m=+1551.384037654" observedRunningTime="2025-12-10 00:56:18.961808304 +0000 UTC m=+1552.039765421" watchObservedRunningTime="2025-12-10 00:56:18.992406973 +0000 UTC m=+1552.070364090" Dec 10 00:56:19 crc kubenswrapper[4884]: I1210 00:56:19.077533 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" podStartSLOduration=2.3454639 podStartE2EDuration="5.077508217s" podCreationTimestamp="2025-12-10 00:56:14 +0000 UTC" firstStartedPulling="2025-12-10 00:56:15.559070125 +0000 UTC m=+1548.637027242" lastFinishedPulling="2025-12-10 00:56:18.291114442 +0000 UTC m=+1551.369071559" observedRunningTime="2025-12-10 00:56:18.980872651 +0000 UTC m=+1552.058829768" watchObservedRunningTime="2025-12-10 00:56:19.077508217 +0000 UTC m=+1552.155465334" Dec 10 00:56:19 crc kubenswrapper[4884]: I1210 00:56:19.178922 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-8567bf5cf5-sh595"] Dec 10 00:56:19 crc kubenswrapper[4884]: I1210 00:56:19.927402 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-8567bf5cf5-sh595" event={"ID":"b9b2136d-c328-4096-b478-ad8836adf843","Type":"ContainerStarted","Data":"e1741c4f515038de16ecff3a4fe5b495789ce84c648e5f9b52ffdc72cd1cdcfe"} Dec 10 00:56:19 crc kubenswrapper[4884]: I1210 00:56:19.927774 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-8567bf5cf5-sh595" event={"ID":"b9b2136d-c328-4096-b478-ad8836adf843","Type":"ContainerStarted","Data":"57199d8e70feac5944bea86eb4628947c47bf58d1f19ee9892e57df7cb9ba174"} Dec 10 00:56:19 crc kubenswrapper[4884]: I1210 00:56:19.927789 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-8567bf5cf5-sh595" event={"ID":"b9b2136d-c328-4096-b478-ad8836adf843","Type":"ContainerStarted","Data":"53b4cfdd2a2c9a139d1065736b0dc2b1d8c85874f807c64ab89b030232f19fe8"} Dec 10 00:56:19 crc kubenswrapper[4884]: I1210 00:56:19.928254 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:19 crc kubenswrapper[4884]: I1210 00:56:19.928465 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:19 crc kubenswrapper[4884]: I1210 00:56:19.966037 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-8567bf5cf5-sh595" podStartSLOduration=3.96601397 podStartE2EDuration="3.96601397s" podCreationTimestamp="2025-12-10 00:56:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:19.949913924 +0000 UTC m=+1553.027871061" watchObservedRunningTime="2025-12-10 00:56:19.96601397 +0000 UTC m=+1553.043971097" Dec 10 00:56:21 crc kubenswrapper[4884]: I1210 00:56:21.966777 4884 generic.go:334] "Generic (PLEG): container finished" podID="341036d7-5d9f-493a-b043-11d6517c390d" containerID="746f6a5a4a296ac79c01aac0deb35cb5815770a4f591e7dd9653c67b78d3ed6d" exitCode=137 Dec 10 00:56:21 crc kubenswrapper[4884]: I1210 00:56:21.966830 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"341036d7-5d9f-493a-b043-11d6517c390d","Type":"ContainerDied","Data":"746f6a5a4a296ac79c01aac0deb35cb5815770a4f591e7dd9653c67b78d3ed6d"} Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.063797 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-6c876d64f-4c6rh"] Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.065190 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.086033 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6c876d64f-4c6rh"] Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.108790 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-566fc6dbdb-qb9bs"] Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.110214 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.126849 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-5867565df6-2bb6n"] Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.132668 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.164609 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-566fc6dbdb-qb9bs"] Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.174505 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5867565df6-2bb6n"] Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.225192 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-config-data\") pod \"heat-cfnapi-566fc6dbdb-qb9bs\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.225273 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c15434d2-166c-4919-ac7f-99ef8bf909b4-config-data\") pod \"heat-engine-6c876d64f-4c6rh\" (UID: \"c15434d2-166c-4919-ac7f-99ef8bf909b4\") " pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.225322 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-combined-ca-bundle\") pod \"heat-api-5867565df6-2bb6n\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.225381 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt6dx\" (UniqueName: \"kubernetes.io/projected/73959270-9ff8-499d-8316-dd45b701ad6f-kube-api-access-nt6dx\") pod \"heat-cfnapi-566fc6dbdb-qb9bs\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.225420 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c15434d2-166c-4919-ac7f-99ef8bf909b4-config-data-custom\") pod \"heat-engine-6c876d64f-4c6rh\" (UID: \"c15434d2-166c-4919-ac7f-99ef8bf909b4\") " pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.225494 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-config-data-custom\") pod \"heat-api-5867565df6-2bb6n\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.225557 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-config-data\") pod \"heat-api-5867565df6-2bb6n\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.225598 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbgwk\" (UniqueName: \"kubernetes.io/projected/c15434d2-166c-4919-ac7f-99ef8bf909b4-kube-api-access-hbgwk\") pod \"heat-engine-6c876d64f-4c6rh\" (UID: \"c15434d2-166c-4919-ac7f-99ef8bf909b4\") " pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.225626 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-combined-ca-bundle\") pod \"heat-cfnapi-566fc6dbdb-qb9bs\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.225667 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8wwl\" (UniqueName: \"kubernetes.io/projected/9baa9257-9a08-427f-bdec-1fa28a81303f-kube-api-access-j8wwl\") pod \"heat-api-5867565df6-2bb6n\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.225702 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c15434d2-166c-4919-ac7f-99ef8bf909b4-combined-ca-bundle\") pod \"heat-engine-6c876d64f-4c6rh\" (UID: \"c15434d2-166c-4919-ac7f-99ef8bf909b4\") " pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.225725 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-config-data-custom\") pod \"heat-cfnapi-566fc6dbdb-qb9bs\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.328245 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-config-data-custom\") pod \"heat-api-5867565df6-2bb6n\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.328330 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-config-data\") pod \"heat-api-5867565df6-2bb6n\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.328364 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbgwk\" (UniqueName: \"kubernetes.io/projected/c15434d2-166c-4919-ac7f-99ef8bf909b4-kube-api-access-hbgwk\") pod \"heat-engine-6c876d64f-4c6rh\" (UID: \"c15434d2-166c-4919-ac7f-99ef8bf909b4\") " pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.328386 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-combined-ca-bundle\") pod \"heat-cfnapi-566fc6dbdb-qb9bs\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.328445 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8wwl\" (UniqueName: \"kubernetes.io/projected/9baa9257-9a08-427f-bdec-1fa28a81303f-kube-api-access-j8wwl\") pod \"heat-api-5867565df6-2bb6n\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.328489 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c15434d2-166c-4919-ac7f-99ef8bf909b4-combined-ca-bundle\") pod \"heat-engine-6c876d64f-4c6rh\" (UID: \"c15434d2-166c-4919-ac7f-99ef8bf909b4\") " pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.328515 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-config-data-custom\") pod \"heat-cfnapi-566fc6dbdb-qb9bs\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.328544 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-config-data\") pod \"heat-cfnapi-566fc6dbdb-qb9bs\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.328584 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c15434d2-166c-4919-ac7f-99ef8bf909b4-config-data\") pod \"heat-engine-6c876d64f-4c6rh\" (UID: \"c15434d2-166c-4919-ac7f-99ef8bf909b4\") " pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.328630 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-combined-ca-bundle\") pod \"heat-api-5867565df6-2bb6n\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.328679 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt6dx\" (UniqueName: \"kubernetes.io/projected/73959270-9ff8-499d-8316-dd45b701ad6f-kube-api-access-nt6dx\") pod \"heat-cfnapi-566fc6dbdb-qb9bs\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.328714 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c15434d2-166c-4919-ac7f-99ef8bf909b4-config-data-custom\") pod \"heat-engine-6c876d64f-4c6rh\" (UID: \"c15434d2-166c-4919-ac7f-99ef8bf909b4\") " pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.340763 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c15434d2-166c-4919-ac7f-99ef8bf909b4-config-data\") pod \"heat-engine-6c876d64f-4c6rh\" (UID: \"c15434d2-166c-4919-ac7f-99ef8bf909b4\") " pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.345021 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8wwl\" (UniqueName: \"kubernetes.io/projected/9baa9257-9a08-427f-bdec-1fa28a81303f-kube-api-access-j8wwl\") pod \"heat-api-5867565df6-2bb6n\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.346295 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c15434d2-166c-4919-ac7f-99ef8bf909b4-config-data-custom\") pod \"heat-engine-6c876d64f-4c6rh\" (UID: \"c15434d2-166c-4919-ac7f-99ef8bf909b4\") " pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.347265 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-combined-ca-bundle\") pod \"heat-cfnapi-566fc6dbdb-qb9bs\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.347355 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-config-data-custom\") pod \"heat-cfnapi-566fc6dbdb-qb9bs\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.347603 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-config-data\") pod \"heat-api-5867565df6-2bb6n\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.348135 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c15434d2-166c-4919-ac7f-99ef8bf909b4-combined-ca-bundle\") pod \"heat-engine-6c876d64f-4c6rh\" (UID: \"c15434d2-166c-4919-ac7f-99ef8bf909b4\") " pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.348210 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-combined-ca-bundle\") pod \"heat-api-5867565df6-2bb6n\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.348754 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-config-data\") pod \"heat-cfnapi-566fc6dbdb-qb9bs\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.352468 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-config-data-custom\") pod \"heat-api-5867565df6-2bb6n\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.354061 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt6dx\" (UniqueName: \"kubernetes.io/projected/73959270-9ff8-499d-8316-dd45b701ad6f-kube-api-access-nt6dx\") pod \"heat-cfnapi-566fc6dbdb-qb9bs\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.365651 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbgwk\" (UniqueName: \"kubernetes.io/projected/c15434d2-166c-4919-ac7f-99ef8bf909b4-kube-api-access-hbgwk\") pod \"heat-engine-6c876d64f-4c6rh\" (UID: \"c15434d2-166c-4919-ac7f-99ef8bf909b4\") " pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.402361 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.433718 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.453504 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.943950 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-mgp4f"] Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.945343 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-mgp4f" Dec 10 00:56:22 crc kubenswrapper[4884]: I1210 00:56:22.957562 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-mgp4f"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.024961 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-dbzrq"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.027167 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-dbzrq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.038032 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-dbzrq"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.043285 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/16359c7a-73d8-4c9b-bb45-2c81c5475330-operator-scripts\") pod \"nova-api-db-create-mgp4f\" (UID: \"16359c7a-73d8-4c9b-bb45-2c81c5475330\") " pod="openstack/nova-api-db-create-mgp4f" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.043374 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-868mr\" (UniqueName: \"kubernetes.io/projected/16359c7a-73d8-4c9b-bb45-2c81c5475330-kube-api-access-868mr\") pod \"nova-api-db-create-mgp4f\" (UID: \"16359c7a-73d8-4c9b-bb45-2c81c5475330\") " pod="openstack/nova-api-db-create-mgp4f" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.136800 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-f64f-account-create-update-qtqz4"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.138214 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f64f-account-create-update-qtqz4" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.140227 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.145599 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-868mr\" (UniqueName: \"kubernetes.io/projected/16359c7a-73d8-4c9b-bb45-2c81c5475330-kube-api-access-868mr\") pod \"nova-api-db-create-mgp4f\" (UID: \"16359c7a-73d8-4c9b-bb45-2c81c5475330\") " pod="openstack/nova-api-db-create-mgp4f" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.145692 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t47sz\" (UniqueName: \"kubernetes.io/projected/589b62f4-a91b-43b0-b49b-b303dcba8a67-kube-api-access-t47sz\") pod \"nova-cell0-db-create-dbzrq\" (UID: \"589b62f4-a91b-43b0-b49b-b303dcba8a67\") " pod="openstack/nova-cell0-db-create-dbzrq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.145716 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/589b62f4-a91b-43b0-b49b-b303dcba8a67-operator-scripts\") pod \"nova-cell0-db-create-dbzrq\" (UID: \"589b62f4-a91b-43b0-b49b-b303dcba8a67\") " pod="openstack/nova-cell0-db-create-dbzrq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.145802 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/16359c7a-73d8-4c9b-bb45-2c81c5475330-operator-scripts\") pod \"nova-api-db-create-mgp4f\" (UID: \"16359c7a-73d8-4c9b-bb45-2c81c5475330\") " pod="openstack/nova-api-db-create-mgp4f" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.146844 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/16359c7a-73d8-4c9b-bb45-2c81c5475330-operator-scripts\") pod \"nova-api-db-create-mgp4f\" (UID: \"16359c7a-73d8-4c9b-bb45-2c81c5475330\") " pod="openstack/nova-api-db-create-mgp4f" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.163417 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-868mr\" (UniqueName: \"kubernetes.io/projected/16359c7a-73d8-4c9b-bb45-2c81c5475330-kube-api-access-868mr\") pod \"nova-api-db-create-mgp4f\" (UID: \"16359c7a-73d8-4c9b-bb45-2c81c5475330\") " pod="openstack/nova-api-db-create-mgp4f" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.201880 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f64f-account-create-update-qtqz4"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.235102 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-l2wlj"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.236407 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-l2wlj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.248938 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t47sz\" (UniqueName: \"kubernetes.io/projected/589b62f4-a91b-43b0-b49b-b303dcba8a67-kube-api-access-t47sz\") pod \"nova-cell0-db-create-dbzrq\" (UID: \"589b62f4-a91b-43b0-b49b-b303dcba8a67\") " pod="openstack/nova-cell0-db-create-dbzrq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.248996 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/589b62f4-a91b-43b0-b49b-b303dcba8a67-operator-scripts\") pod \"nova-cell0-db-create-dbzrq\" (UID: \"589b62f4-a91b-43b0-b49b-b303dcba8a67\") " pod="openstack/nova-cell0-db-create-dbzrq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.249172 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a5d45a0-51a8-4925-bbba-2b9d42ac9114-operator-scripts\") pod \"nova-api-f64f-account-create-update-qtqz4\" (UID: \"4a5d45a0-51a8-4925-bbba-2b9d42ac9114\") " pod="openstack/nova-api-f64f-account-create-update-qtqz4" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.249208 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqngz\" (UniqueName: \"kubernetes.io/projected/4a5d45a0-51a8-4925-bbba-2b9d42ac9114-kube-api-access-jqngz\") pod \"nova-api-f64f-account-create-update-qtqz4\" (UID: \"4a5d45a0-51a8-4925-bbba-2b9d42ac9114\") " pod="openstack/nova-api-f64f-account-create-update-qtqz4" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.250556 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/589b62f4-a91b-43b0-b49b-b303dcba8a67-operator-scripts\") pod \"nova-cell0-db-create-dbzrq\" (UID: \"589b62f4-a91b-43b0-b49b-b303dcba8a67\") " pod="openstack/nova-cell0-db-create-dbzrq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.263029 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-l2wlj"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.264228 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-mgp4f" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.280673 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t47sz\" (UniqueName: \"kubernetes.io/projected/589b62f4-a91b-43b0-b49b-b303dcba8a67-kube-api-access-t47sz\") pod \"nova-cell0-db-create-dbzrq\" (UID: \"589b62f4-a91b-43b0-b49b-b303dcba8a67\") " pod="openstack/nova-cell0-db-create-dbzrq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.335350 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-321f-account-create-update-sgsmd"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.346241 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-321f-account-create-update-sgsmd"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.346572 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-321f-account-create-update-sgsmd" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.346866 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-dbzrq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.348700 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.350872 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/636dddde-3987-4843-917d-956aedd66a22-operator-scripts\") pod \"nova-cell1-db-create-l2wlj\" (UID: \"636dddde-3987-4843-917d-956aedd66a22\") " pod="openstack/nova-cell1-db-create-l2wlj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.350924 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a5d45a0-51a8-4925-bbba-2b9d42ac9114-operator-scripts\") pod \"nova-api-f64f-account-create-update-qtqz4\" (UID: \"4a5d45a0-51a8-4925-bbba-2b9d42ac9114\") " pod="openstack/nova-api-f64f-account-create-update-qtqz4" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.350959 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqngz\" (UniqueName: \"kubernetes.io/projected/4a5d45a0-51a8-4925-bbba-2b9d42ac9114-kube-api-access-jqngz\") pod \"nova-api-f64f-account-create-update-qtqz4\" (UID: \"4a5d45a0-51a8-4925-bbba-2b9d42ac9114\") " pod="openstack/nova-api-f64f-account-create-update-qtqz4" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.351004 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2qrt\" (UniqueName: \"kubernetes.io/projected/636dddde-3987-4843-917d-956aedd66a22-kube-api-access-p2qrt\") pod \"nova-cell1-db-create-l2wlj\" (UID: \"636dddde-3987-4843-917d-956aedd66a22\") " pod="openstack/nova-cell1-db-create-l2wlj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.351763 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a5d45a0-51a8-4925-bbba-2b9d42ac9114-operator-scripts\") pod \"nova-api-f64f-account-create-update-qtqz4\" (UID: \"4a5d45a0-51a8-4925-bbba-2b9d42ac9114\") " pod="openstack/nova-api-f64f-account-create-update-qtqz4" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.377773 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqngz\" (UniqueName: \"kubernetes.io/projected/4a5d45a0-51a8-4925-bbba-2b9d42ac9114-kube-api-access-jqngz\") pod \"nova-api-f64f-account-create-update-qtqz4\" (UID: \"4a5d45a0-51a8-4925-bbba-2b9d42ac9114\") " pod="openstack/nova-api-f64f-account-create-update-qtqz4" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.452464 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2qrt\" (UniqueName: \"kubernetes.io/projected/636dddde-3987-4843-917d-956aedd66a22-kube-api-access-p2qrt\") pod \"nova-cell1-db-create-l2wlj\" (UID: \"636dddde-3987-4843-917d-956aedd66a22\") " pod="openstack/nova-cell1-db-create-l2wlj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.452558 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/253e3b48-23a7-4433-9f12-ae9901a3260d-operator-scripts\") pod \"nova-cell0-321f-account-create-update-sgsmd\" (UID: \"253e3b48-23a7-4433-9f12-ae9901a3260d\") " pod="openstack/nova-cell0-321f-account-create-update-sgsmd" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.452589 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhbq4\" (UniqueName: \"kubernetes.io/projected/253e3b48-23a7-4433-9f12-ae9901a3260d-kube-api-access-hhbq4\") pod \"nova-cell0-321f-account-create-update-sgsmd\" (UID: \"253e3b48-23a7-4433-9f12-ae9901a3260d\") " pod="openstack/nova-cell0-321f-account-create-update-sgsmd" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.452672 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/636dddde-3987-4843-917d-956aedd66a22-operator-scripts\") pod \"nova-cell1-db-create-l2wlj\" (UID: \"636dddde-3987-4843-917d-956aedd66a22\") " pod="openstack/nova-cell1-db-create-l2wlj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.453733 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/636dddde-3987-4843-917d-956aedd66a22-operator-scripts\") pod \"nova-cell1-db-create-l2wlj\" (UID: \"636dddde-3987-4843-917d-956aedd66a22\") " pod="openstack/nova-cell1-db-create-l2wlj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.462184 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f64f-account-create-update-qtqz4" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.471950 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2qrt\" (UniqueName: \"kubernetes.io/projected/636dddde-3987-4843-917d-956aedd66a22-kube-api-access-p2qrt\") pod \"nova-cell1-db-create-l2wlj\" (UID: \"636dddde-3987-4843-917d-956aedd66a22\") " pod="openstack/nova-cell1-db-create-l2wlj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.540512 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-c589-account-create-update-c99cj"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.541943 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-c589-account-create-update-c99cj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.544589 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.549906 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-c589-account-create-update-c99cj"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.554180 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/253e3b48-23a7-4433-9f12-ae9901a3260d-operator-scripts\") pod \"nova-cell0-321f-account-create-update-sgsmd\" (UID: \"253e3b48-23a7-4433-9f12-ae9901a3260d\") " pod="openstack/nova-cell0-321f-account-create-update-sgsmd" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.554237 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhbq4\" (UniqueName: \"kubernetes.io/projected/253e3b48-23a7-4433-9f12-ae9901a3260d-kube-api-access-hhbq4\") pod \"nova-cell0-321f-account-create-update-sgsmd\" (UID: \"253e3b48-23a7-4433-9f12-ae9901a3260d\") " pod="openstack/nova-cell0-321f-account-create-update-sgsmd" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.555165 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/253e3b48-23a7-4433-9f12-ae9901a3260d-operator-scripts\") pod \"nova-cell0-321f-account-create-update-sgsmd\" (UID: \"253e3b48-23a7-4433-9f12-ae9901a3260d\") " pod="openstack/nova-cell0-321f-account-create-update-sgsmd" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.565922 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-l2wlj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.575867 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhbq4\" (UniqueName: \"kubernetes.io/projected/253e3b48-23a7-4433-9f12-ae9901a3260d-kube-api-access-hhbq4\") pod \"nova-cell0-321f-account-create-update-sgsmd\" (UID: \"253e3b48-23a7-4433-9f12-ae9901a3260d\") " pod="openstack/nova-cell0-321f-account-create-update-sgsmd" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.656718 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbf4c134-f53b-4613-bc36-92d89f55f8be-operator-scripts\") pod \"nova-cell1-c589-account-create-update-c99cj\" (UID: \"bbf4c134-f53b-4613-bc36-92d89f55f8be\") " pod="openstack/nova-cell1-c589-account-create-update-c99cj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.656802 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7dr4\" (UniqueName: \"kubernetes.io/projected/bbf4c134-f53b-4613-bc36-92d89f55f8be-kube-api-access-f7dr4\") pod \"nova-cell1-c589-account-create-update-c99cj\" (UID: \"bbf4c134-f53b-4613-bc36-92d89f55f8be\") " pod="openstack/nova-cell1-c589-account-create-update-c99cj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.718402 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-8c56c4785-52p9j"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.719732 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-8c56c4785-52p9j" podUID="9ea81429-d4fc-42e1-b626-dfa345c7f698" containerName="heat-api" containerID="cri-o://d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602" gracePeriod=60 Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.729270 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-321f-account-create-update-sgsmd" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.750207 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-8c56c4785-52p9j" podUID="9ea81429-d4fc-42e1-b626-dfa345c7f698" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.194:8004/healthcheck\": EOF" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.762862 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-79b9f4dd9f-25vz6"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.763545 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" podUID="6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f" containerName="heat-cfnapi" containerID="cri-o://a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707" gracePeriod=60 Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.787750 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" podUID="6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.193:8000/healthcheck\": EOF" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.790503 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbf4c134-f53b-4613-bc36-92d89f55f8be-operator-scripts\") pod \"nova-cell1-c589-account-create-update-c99cj\" (UID: \"bbf4c134-f53b-4613-bc36-92d89f55f8be\") " pod="openstack/nova-cell1-c589-account-create-update-c99cj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.793214 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbf4c134-f53b-4613-bc36-92d89f55f8be-operator-scripts\") pod \"nova-cell1-c589-account-create-update-c99cj\" (UID: \"bbf4c134-f53b-4613-bc36-92d89f55f8be\") " pod="openstack/nova-cell1-c589-account-create-update-c99cj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.806229 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7dr4\" (UniqueName: \"kubernetes.io/projected/bbf4c134-f53b-4613-bc36-92d89f55f8be-kube-api-access-f7dr4\") pod \"nova-cell1-c589-account-create-update-c99cj\" (UID: \"bbf4c134-f53b-4613-bc36-92d89f55f8be\") " pod="openstack/nova-cell1-c589-account-create-update-c99cj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.809897 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-56f5c7d86c-hthhq"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.815498 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.823946 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.824020 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.849150 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-56f5c7d86c-hthhq"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.865764 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-c847d79b6-kwhvk"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.867357 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.871425 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.871868 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.872223 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7dr4\" (UniqueName: \"kubernetes.io/projected/bbf4c134-f53b-4613-bc36-92d89f55f8be-kube-api-access-f7dr4\") pod \"nova-cell1-c589-account-create-update-c99cj\" (UID: \"bbf4c134-f53b-4613-bc36-92d89f55f8be\") " pod="openstack/nova-cell1-c589-account-create-update-c99cj" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.885218 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-c847d79b6-kwhvk"] Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.910084 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-config-data-custom\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.910165 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7wkk\" (UniqueName: \"kubernetes.io/projected/644adf64-d8e7-40ff-8075-2f9595faa559-kube-api-access-s7wkk\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.910198 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-combined-ca-bundle\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.910317 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-config-data\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.910498 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-internal-tls-certs\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.910607 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-internal-tls-certs\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.910639 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-public-tls-certs\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.910659 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-config-data\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.910686 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-config-data-custom\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.910713 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wsxm\" (UniqueName: \"kubernetes.io/projected/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-kube-api-access-8wsxm\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.910739 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-public-tls-certs\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:23 crc kubenswrapper[4884]: I1210 00:56:23.910853 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-combined-ca-bundle\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.012948 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-config-data-custom\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.013018 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7wkk\" (UniqueName: \"kubernetes.io/projected/644adf64-d8e7-40ff-8075-2f9595faa559-kube-api-access-s7wkk\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.013057 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-combined-ca-bundle\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.013094 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-config-data\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.013157 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-internal-tls-certs\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.013233 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-internal-tls-certs\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.013421 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-public-tls-certs\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.013815 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-config-data\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.013849 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-config-data-custom\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.013876 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wsxm\" (UniqueName: \"kubernetes.io/projected/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-kube-api-access-8wsxm\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.013901 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-public-tls-certs\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.013934 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-combined-ca-bundle\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.022186 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-public-tls-certs\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.035481 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-config-data\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.040873 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wsxm\" (UniqueName: \"kubernetes.io/projected/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-kube-api-access-8wsxm\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.041841 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7wkk\" (UniqueName: \"kubernetes.io/projected/644adf64-d8e7-40ff-8075-2f9595faa559-kube-api-access-s7wkk\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.050611 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-combined-ca-bundle\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.050688 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-public-tls-certs\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.051195 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-internal-tls-certs\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.052732 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-combined-ca-bundle\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.052786 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-config-data\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.053816 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9-config-data-custom\") pod \"heat-api-c847d79b6-kwhvk\" (UID: \"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9\") " pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.055153 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-internal-tls-certs\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.058161 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/644adf64-d8e7-40ff-8075-2f9595faa559-config-data-custom\") pod \"heat-cfnapi-56f5c7d86c-hthhq\" (UID: \"644adf64-d8e7-40ff-8075-2f9595faa559\") " pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.169849 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-c589-account-create-update-c99cj" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.207214 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:24 crc kubenswrapper[4884]: I1210 00:56:24.217530 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:25 crc kubenswrapper[4884]: I1210 00:56:25.232914 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:56:25 crc kubenswrapper[4884]: I1210 00:56:25.309316 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-5mctc"] Dec 10 00:56:25 crc kubenswrapper[4884]: I1210 00:56:25.309600 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" podUID="2cecdb9b-c23c-435f-9099-c8ec4118da3a" containerName="dnsmasq-dns" containerID="cri-o://6f80a30d0ec7d5b050ed42f272509638d01297da42326f6319663958d7235830" gracePeriod=10 Dec 10 00:56:26 crc kubenswrapper[4884]: I1210 00:56:26.009493 4884 generic.go:334] "Generic (PLEG): container finished" podID="2cecdb9b-c23c-435f-9099-c8ec4118da3a" containerID="6f80a30d0ec7d5b050ed42f272509638d01297da42326f6319663958d7235830" exitCode=0 Dec 10 00:56:26 crc kubenswrapper[4884]: I1210 00:56:26.009533 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" event={"ID":"2cecdb9b-c23c-435f-9099-c8ec4118da3a","Type":"ContainerDied","Data":"6f80a30d0ec7d5b050ed42f272509638d01297da42326f6319663958d7235830"} Dec 10 00:56:26 crc kubenswrapper[4884]: I1210 00:56:26.876205 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" podUID="2cecdb9b-c23c-435f-9099-c8ec4118da3a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.185:5353: connect: connection refused" Dec 10 00:56:27 crc kubenswrapper[4884]: I1210 00:56:27.316108 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:27 crc kubenswrapper[4884]: I1210 00:56:27.316150 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-8567bf5cf5-sh595" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.435376 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.511410 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-combined-ca-bundle\") pod \"341036d7-5d9f-493a-b043-11d6517c390d\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.512060 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-scripts\") pod \"341036d7-5d9f-493a-b043-11d6517c390d\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.512123 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/341036d7-5d9f-493a-b043-11d6517c390d-run-httpd\") pod \"341036d7-5d9f-493a-b043-11d6517c390d\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.512198 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnvqb\" (UniqueName: \"kubernetes.io/projected/341036d7-5d9f-493a-b043-11d6517c390d-kube-api-access-tnvqb\") pod \"341036d7-5d9f-493a-b043-11d6517c390d\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.512245 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-config-data\") pod \"341036d7-5d9f-493a-b043-11d6517c390d\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.527784 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/341036d7-5d9f-493a-b043-11d6517c390d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "341036d7-5d9f-493a-b043-11d6517c390d" (UID: "341036d7-5d9f-493a-b043-11d6517c390d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.536422 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/341036d7-5d9f-493a-b043-11d6517c390d-kube-api-access-tnvqb" (OuterVolumeSpecName: "kube-api-access-tnvqb") pod "341036d7-5d9f-493a-b043-11d6517c390d" (UID: "341036d7-5d9f-493a-b043-11d6517c390d"). InnerVolumeSpecName "kube-api-access-tnvqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.551665 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-scripts" (OuterVolumeSpecName: "scripts") pod "341036d7-5d9f-493a-b043-11d6517c390d" (UID: "341036d7-5d9f-493a-b043-11d6517c390d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.614168 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/341036d7-5d9f-493a-b043-11d6517c390d-log-httpd\") pod \"341036d7-5d9f-493a-b043-11d6517c390d\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.614313 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-sg-core-conf-yaml\") pod \"341036d7-5d9f-493a-b043-11d6517c390d\" (UID: \"341036d7-5d9f-493a-b043-11d6517c390d\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.614804 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.614819 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/341036d7-5d9f-493a-b043-11d6517c390d-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.614828 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnvqb\" (UniqueName: \"kubernetes.io/projected/341036d7-5d9f-493a-b043-11d6517c390d-kube-api-access-tnvqb\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.617071 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/341036d7-5d9f-493a-b043-11d6517c390d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "341036d7-5d9f-493a-b043-11d6517c390d" (UID: "341036d7-5d9f-493a-b043-11d6517c390d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.620885 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.673364 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-config-data" (OuterVolumeSpecName: "config-data") pod "341036d7-5d9f-493a-b043-11d6517c390d" (UID: "341036d7-5d9f-493a-b043-11d6517c390d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.689404 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "341036d7-5d9f-493a-b043-11d6517c390d" (UID: "341036d7-5d9f-493a-b043-11d6517c390d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.715779 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgpt4\" (UniqueName: \"kubernetes.io/projected/2cecdb9b-c23c-435f-9099-c8ec4118da3a-kube-api-access-bgpt4\") pod \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.715963 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-ovsdbserver-sb\") pod \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.715991 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-dns-swift-storage-0\") pod \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.716021 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-config\") pod \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.716243 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-dns-svc\") pod \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.716369 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-ovsdbserver-nb\") pod \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\" (UID: \"2cecdb9b-c23c-435f-9099-c8ec4118da3a\") " Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.717313 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.717334 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/341036d7-5d9f-493a-b043-11d6517c390d-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.717343 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.725616 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2cecdb9b-c23c-435f-9099-c8ec4118da3a-kube-api-access-bgpt4" (OuterVolumeSpecName: "kube-api-access-bgpt4") pod "2cecdb9b-c23c-435f-9099-c8ec4118da3a" (UID: "2cecdb9b-c23c-435f-9099-c8ec4118da3a"). InnerVolumeSpecName "kube-api-access-bgpt4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.731501 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "341036d7-5d9f-493a-b043-11d6517c390d" (UID: "341036d7-5d9f-493a-b043-11d6517c390d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.779878 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2cecdb9b-c23c-435f-9099-c8ec4118da3a" (UID: "2cecdb9b-c23c-435f-9099-c8ec4118da3a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.789062 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2cecdb9b-c23c-435f-9099-c8ec4118da3a" (UID: "2cecdb9b-c23c-435f-9099-c8ec4118da3a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.793960 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2cecdb9b-c23c-435f-9099-c8ec4118da3a" (UID: "2cecdb9b-c23c-435f-9099-c8ec4118da3a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.796922 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2cecdb9b-c23c-435f-9099-c8ec4118da3a" (UID: "2cecdb9b-c23c-435f-9099-c8ec4118da3a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.805855 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-config" (OuterVolumeSpecName: "config") pod "2cecdb9b-c23c-435f-9099-c8ec4118da3a" (UID: "2cecdb9b-c23c-435f-9099-c8ec4118da3a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.819208 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.819327 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.819385 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.819474 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.819538 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgpt4\" (UniqueName: \"kubernetes.io/projected/2cecdb9b-c23c-435f-9099-c8ec4118da3a-kube-api-access-bgpt4\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.819594 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/341036d7-5d9f-493a-b043-11d6517c390d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:28 crc kubenswrapper[4884]: I1210 00:56:28.819654 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2cecdb9b-c23c-435f-9099-c8ec4118da3a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.054818 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"341036d7-5d9f-493a-b043-11d6517c390d","Type":"ContainerDied","Data":"c0783c4de94b5a22879e3effd09a5bb4244bd392c93af4b757cd87bdf34a5e19"} Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.054865 4884 scope.go:117] "RemoveContainer" containerID="746f6a5a4a296ac79c01aac0deb35cb5815770a4f591e7dd9653c67b78d3ed6d" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.054915 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.062694 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-321f-account-create-update-sgsmd"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.075169 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" event={"ID":"2cecdb9b-c23c-435f-9099-c8ec4118da3a","Type":"ContainerDied","Data":"f913d239da205d0f2fd67d22439f8b75452802a37816972b4319d3d22bb92c94"} Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.075281 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-5mctc" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.077951 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.081952 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"b215a749-e579-4854-a2f7-ebaf6c3416a8","Type":"ContainerStarted","Data":"74d8190454b392c0f6cb32e6952b466b198d2c622fb6b4240f50577090804376"} Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.110701 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.05634458 podStartE2EDuration="18.110680405s" podCreationTimestamp="2025-12-10 00:56:11 +0000 UTC" firstStartedPulling="2025-12-10 00:56:12.110742692 +0000 UTC m=+1545.188699809" lastFinishedPulling="2025-12-10 00:56:28.165078517 +0000 UTC m=+1561.243035634" observedRunningTime="2025-12-10 00:56:29.096738977 +0000 UTC m=+1562.174696094" watchObservedRunningTime="2025-12-10 00:56:29.110680405 +0000 UTC m=+1562.188637522" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.145183 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-api-8c56c4785-52p9j" podUID="9ea81429-d4fc-42e1-b626-dfa345c7f698" containerName="heat-api" probeResult="failure" output="Get \"http://10.217.0.194:8004/healthcheck\": read tcp 10.217.0.2:42582->10.217.0.194:8004: read: connection reset by peer" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.154298 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-5mctc"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.165800 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-5mctc"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.169322 4884 scope.go:117] "RemoveContainer" containerID="12a903484f96f6a787ada5646f4c6141f58587a1f62c000387897987f9abaa29" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.204595 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" podUID="6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f" containerName="heat-cfnapi" probeResult="failure" output="Get \"http://10.217.0.193:8000/healthcheck\": read tcp 10.217.0.2:44196->10.217.0.193:8000: read: connection reset by peer" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.312349 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2cecdb9b-c23c-435f-9099-c8ec4118da3a" path="/var/lib/kubelet/pods/2cecdb9b-c23c-435f-9099-c8ec4118da3a/volumes" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.313930 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.326309 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.342001 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:56:29 crc kubenswrapper[4884]: E1210 00:56:29.342426 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="341036d7-5d9f-493a-b043-11d6517c390d" containerName="ceilometer-notification-agent" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.342458 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="341036d7-5d9f-493a-b043-11d6517c390d" containerName="ceilometer-notification-agent" Dec 10 00:56:29 crc kubenswrapper[4884]: E1210 00:56:29.342469 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="341036d7-5d9f-493a-b043-11d6517c390d" containerName="sg-core" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.342475 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="341036d7-5d9f-493a-b043-11d6517c390d" containerName="sg-core" Dec 10 00:56:29 crc kubenswrapper[4884]: E1210 00:56:29.342487 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cecdb9b-c23c-435f-9099-c8ec4118da3a" containerName="init" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.342493 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cecdb9b-c23c-435f-9099-c8ec4118da3a" containerName="init" Dec 10 00:56:29 crc kubenswrapper[4884]: E1210 00:56:29.342514 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2cecdb9b-c23c-435f-9099-c8ec4118da3a" containerName="dnsmasq-dns" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.342520 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2cecdb9b-c23c-435f-9099-c8ec4118da3a" containerName="dnsmasq-dns" Dec 10 00:56:29 crc kubenswrapper[4884]: E1210 00:56:29.342530 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="341036d7-5d9f-493a-b043-11d6517c390d" containerName="proxy-httpd" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.342537 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="341036d7-5d9f-493a-b043-11d6517c390d" containerName="proxy-httpd" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.342751 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="341036d7-5d9f-493a-b043-11d6517c390d" containerName="proxy-httpd" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.342762 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="341036d7-5d9f-493a-b043-11d6517c390d" containerName="ceilometer-notification-agent" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.342775 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2cecdb9b-c23c-435f-9099-c8ec4118da3a" containerName="dnsmasq-dns" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.342793 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="341036d7-5d9f-493a-b043-11d6517c390d" containerName="sg-core" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.354728 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.354823 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.356633 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.358014 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.445587 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.445737 4884 scope.go:117] "RemoveContainer" containerID="da117cab1679c3c345de0912eb6f653eafe1b21c86828fb9318b71dd52095381" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.446069 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-scripts\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.446147 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.446269 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cd2m8\" (UniqueName: \"kubernetes.io/projected/ffc65c2a-883e-4939-8685-a0d77fd1c717-kube-api-access-cd2m8\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.446313 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-config-data\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.446373 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffc65c2a-883e-4939-8685-a0d77fd1c717-log-httpd\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.446416 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffc65c2a-883e-4939-8685-a0d77fd1c717-run-httpd\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.493077 4884 scope.go:117] "RemoveContainer" containerID="6f80a30d0ec7d5b050ed42f272509638d01297da42326f6319663958d7235830" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.520955 4884 scope.go:117] "RemoveContainer" containerID="0acc21e6e09796daee597f3b8b9fa892b042c0ab08d6abafdf6f71b59ccb0d57" Dec 10 00:56:29 crc kubenswrapper[4884]: W1210 00:56:29.538929 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod636dddde_3987_4843_917d_956aedd66a22.slice/crio-fdd2b39b84a0f4f66398498270c78c957a7df7c0d510dfca3f97238e24ed22e0 WatchSource:0}: Error finding container fdd2b39b84a0f4f66398498270c78c957a7df7c0d510dfca3f97238e24ed22e0: Status 404 returned error can't find the container with id fdd2b39b84a0f4f66398498270c78c957a7df7c0d510dfca3f97238e24ed22e0 Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.550190 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-l2wlj"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.551100 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-scripts\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.551174 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.551272 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cd2m8\" (UniqueName: \"kubernetes.io/projected/ffc65c2a-883e-4939-8685-a0d77fd1c717-kube-api-access-cd2m8\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.551296 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-config-data\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.551352 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffc65c2a-883e-4939-8685-a0d77fd1c717-log-httpd\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.551383 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffc65c2a-883e-4939-8685-a0d77fd1c717-run-httpd\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.551453 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.554526 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffc65c2a-883e-4939-8685-a0d77fd1c717-log-httpd\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.555484 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffc65c2a-883e-4939-8685-a0d77fd1c717-run-httpd\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.556980 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-scripts\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.560349 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-config-data\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.562359 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.566234 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cd2m8\" (UniqueName: \"kubernetes.io/projected/ffc65c2a-883e-4939-8685-a0d77fd1c717-kube-api-access-cd2m8\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.569063 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: W1210 00:56:29.584952 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod73959270_9ff8_499d_8316_dd45b701ad6f.slice/crio-c9aeae66de7cbdb697579c61388ddf2ef9b3f5c128142a53b65a142e3382bcb6 WatchSource:0}: Error finding container c9aeae66de7cbdb697579c61388ddf2ef9b3f5c128142a53b65a142e3382bcb6: Status 404 returned error can't find the container with id c9aeae66de7cbdb697579c61388ddf2ef9b3f5c128142a53b65a142e3382bcb6 Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.588391 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-mgp4f"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.612017 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-56f5c7d86c-hthhq"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.629502 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-566fc6dbdb-qb9bs"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.642491 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-6c876d64f-4c6rh"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.648624 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-c847d79b6-kwhvk"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.785214 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.827277 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.849754 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.958867 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-config-data\") pod \"9ea81429-d4fc-42e1-b626-dfa345c7f698\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.958906 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-combined-ca-bundle\") pod \"9ea81429-d4fc-42e1-b626-dfa345c7f698\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.960208 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5d9jk\" (UniqueName: \"kubernetes.io/projected/9ea81429-d4fc-42e1-b626-dfa345c7f698-kube-api-access-5d9jk\") pod \"9ea81429-d4fc-42e1-b626-dfa345c7f698\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.960333 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-config-data-custom\") pod \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.960374 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-config-data-custom\") pod \"9ea81429-d4fc-42e1-b626-dfa345c7f698\" (UID: \"9ea81429-d4fc-42e1-b626-dfa345c7f698\") " Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.976888 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-combined-ca-bundle\") pod \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.976920 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz26l\" (UniqueName: \"kubernetes.io/projected/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-kube-api-access-lz26l\") pod \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.976946 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-config-data\") pod \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\" (UID: \"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f\") " Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.979894 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ea81429-d4fc-42e1-b626-dfa345c7f698-kube-api-access-5d9jk" (OuterVolumeSpecName: "kube-api-access-5d9jk") pod "9ea81429-d4fc-42e1-b626-dfa345c7f698" (UID: "9ea81429-d4fc-42e1-b626-dfa345c7f698"). InnerVolumeSpecName "kube-api-access-5d9jk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.961961 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-c589-account-create-update-c99cj"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.980067 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-dbzrq"] Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.988825 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9ea81429-d4fc-42e1-b626-dfa345c7f698" (UID: "9ea81429-d4fc-42e1-b626-dfa345c7f698"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:29 crc kubenswrapper[4884]: I1210 00:56:29.995596 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-5867565df6-2bb6n"] Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.000415 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f" (UID: "6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.010163 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fzs89"] Dec 10 00:56:30 crc kubenswrapper[4884]: E1210 00:56:30.010684 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ea81429-d4fc-42e1-b626-dfa345c7f698" containerName="heat-api" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.010702 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ea81429-d4fc-42e1-b626-dfa345c7f698" containerName="heat-api" Dec 10 00:56:30 crc kubenswrapper[4884]: E1210 00:56:30.010724 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f" containerName="heat-cfnapi" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.010732 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f" containerName="heat-cfnapi" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.010925 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ea81429-d4fc-42e1-b626-dfa345c7f698" containerName="heat-api" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.010952 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f" containerName="heat-cfnapi" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.012887 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:30 crc kubenswrapper[4884]: W1210 00:56:30.014263 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod589b62f4_a91b_43b0_b49b_b303dcba8a67.slice/crio-93ebeb68d09c84454c9874bc8eb1e771d52af1bbf7745215019b84f253b8833f WatchSource:0}: Error finding container 93ebeb68d09c84454c9874bc8eb1e771d52af1bbf7745215019b84f253b8833f: Status 404 returned error can't find the container with id 93ebeb68d09c84454c9874bc8eb1e771d52af1bbf7745215019b84f253b8833f Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.021575 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fzs89"] Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.033005 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-kube-api-access-lz26l" (OuterVolumeSpecName: "kube-api-access-lz26l") pod "6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f" (UID: "6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f"). InnerVolumeSpecName "kube-api-access-lz26l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.036372 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f64f-account-create-update-qtqz4"] Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.048402 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.103356 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4518e9df-bc84-45c9-ae55-3af2cff56a19-catalog-content\") pod \"certified-operators-fzs89\" (UID: \"4518e9df-bc84-45c9-ae55-3af2cff56a19\") " pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.103695 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4518e9df-bc84-45c9-ae55-3af2cff56a19-utilities\") pod \"certified-operators-fzs89\" (UID: \"4518e9df-bc84-45c9-ae55-3af2cff56a19\") " pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.103820 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tk6xf\" (UniqueName: \"kubernetes.io/projected/4518e9df-bc84-45c9-ae55-3af2cff56a19-kube-api-access-tk6xf\") pod \"certified-operators-fzs89\" (UID: \"4518e9df-bc84-45c9-ae55-3af2cff56a19\") " pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.104857 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5d9jk\" (UniqueName: \"kubernetes.io/projected/9ea81429-d4fc-42e1-b626-dfa345c7f698-kube-api-access-5d9jk\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.104878 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.104889 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.104903 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz26l\" (UniqueName: \"kubernetes.io/projected/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-kube-api-access-lz26l\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:30 crc kubenswrapper[4884]: W1210 00:56:30.126939 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a5d45a0_51a8_4925_bbba_2b9d42ac9114.slice/crio-bdd473f8bd0441b20f04e9249aa99a41fb0850852f85e17fa80af369ddbcb3f7 WatchSource:0}: Error finding container bdd473f8bd0441b20f04e9249aa99a41fb0850852f85e17fa80af369ddbcb3f7: Status 404 returned error can't find the container with id bdd473f8bd0441b20f04e9249aa99a41fb0850852f85e17fa80af369ddbcb3f7 Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.150861 4884 generic.go:334] "Generic (PLEG): container finished" podID="6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f" containerID="a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707" exitCode=0 Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.150937 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" event={"ID":"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f","Type":"ContainerDied","Data":"a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.150968 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" event={"ID":"6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f","Type":"ContainerDied","Data":"b082e5cf709dc39dba6db9fde4b652e7730968ca0878856bd3029758b6e56ec5"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.150985 4884 scope.go:117] "RemoveContainer" containerID="a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.151084 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-79b9f4dd9f-25vz6" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.161895 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9ea81429-d4fc-42e1-b626-dfa345c7f698" (UID: "9ea81429-d4fc-42e1-b626-dfa345c7f698"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.163558 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f" (UID: "6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.205791 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.206386 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tk6xf\" (UniqueName: \"kubernetes.io/projected/4518e9df-bc84-45c9-ae55-3af2cff56a19-kube-api-access-tk6xf\") pod \"certified-operators-fzs89\" (UID: \"4518e9df-bc84-45c9-ae55-3af2cff56a19\") " pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.206467 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4518e9df-bc84-45c9-ae55-3af2cff56a19-catalog-content\") pod \"certified-operators-fzs89\" (UID: \"4518e9df-bc84-45c9-ae55-3af2cff56a19\") " pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.206588 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4518e9df-bc84-45c9-ae55-3af2cff56a19-utilities\") pod \"certified-operators-fzs89\" (UID: \"4518e9df-bc84-45c9-ae55-3af2cff56a19\") " pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.206683 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.206695 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.207209 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4518e9df-bc84-45c9-ae55-3af2cff56a19-utilities\") pod \"certified-operators-fzs89\" (UID: \"4518e9df-bc84-45c9-ae55-3af2cff56a19\") " pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.207686 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4518e9df-bc84-45c9-ae55-3af2cff56a19-catalog-content\") pod \"certified-operators-fzs89\" (UID: \"4518e9df-bc84-45c9-ae55-3af2cff56a19\") " pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.209324 4884 generic.go:334] "Generic (PLEG): container finished" podID="253e3b48-23a7-4433-9f12-ae9901a3260d" containerID="344d6683a1b9acb15fe9610d22ee9a38f0a4b65a8891b1193b1ae74d3fd3c617" exitCode=0 Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.209461 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-321f-account-create-update-sgsmd" event={"ID":"253e3b48-23a7-4433-9f12-ae9901a3260d","Type":"ContainerDied","Data":"344d6683a1b9acb15fe9610d22ee9a38f0a4b65a8891b1193b1ae74d3fd3c617"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.209534 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-321f-account-create-update-sgsmd" event={"ID":"253e3b48-23a7-4433-9f12-ae9901a3260d","Type":"ContainerStarted","Data":"675c72df13402d19b084abc2e9133a2beec115d9ace64a8dbdbde55f26d15905"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.213701 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-config-data" (OuterVolumeSpecName: "config-data") pod "9ea81429-d4fc-42e1-b626-dfa345c7f698" (UID: "9ea81429-d4fc-42e1-b626-dfa345c7f698"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.222164 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" event={"ID":"73959270-9ff8-499d-8316-dd45b701ad6f","Type":"ContainerStarted","Data":"c9aeae66de7cbdb697579c61388ddf2ef9b3f5c128142a53b65a142e3382bcb6"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.226686 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-config-data" (OuterVolumeSpecName: "config-data") pod "6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f" (UID: "6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.236733 4884 generic.go:334] "Generic (PLEG): container finished" podID="9ea81429-d4fc-42e1-b626-dfa345c7f698" containerID="d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602" exitCode=0 Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.236828 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-8c56c4785-52p9j" event={"ID":"9ea81429-d4fc-42e1-b626-dfa345c7f698","Type":"ContainerDied","Data":"d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.236855 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-8c56c4785-52p9j" event={"ID":"9ea81429-d4fc-42e1-b626-dfa345c7f698","Type":"ContainerDied","Data":"2dad861742e8cfc70e682f33a138f0831ab59f0ac6507f7cac4bfe9e9175b104"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.236913 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-8c56c4785-52p9j" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.240363 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tk6xf\" (UniqueName: \"kubernetes.io/projected/4518e9df-bc84-45c9-ae55-3af2cff56a19-kube-api-access-tk6xf\") pod \"certified-operators-fzs89\" (UID: \"4518e9df-bc84-45c9-ae55-3af2cff56a19\") " pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.245093 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5867565df6-2bb6n" event={"ID":"9baa9257-9a08-427f-bdec-1fa28a81303f","Type":"ContainerStarted","Data":"c8d87fcda98a938f28cbc723669b880201a75fadf14823e8b5c8fa6076c8e8ea"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.251543 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-c589-account-create-update-c99cj" event={"ID":"bbf4c134-f53b-4613-bc36-92d89f55f8be","Type":"ContainerStarted","Data":"b3d10f490f060ee20ce474dae1ae0da53ed0ad0e7fc4231c7227e34e966846c5"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.265276 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-l2wlj" event={"ID":"636dddde-3987-4843-917d-956aedd66a22","Type":"ContainerStarted","Data":"5a18b496c72d632255aaa36434d4eea0fa18107f47c02e03dd150602063028eb"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.265337 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-l2wlj" event={"ID":"636dddde-3987-4843-917d-956aedd66a22","Type":"ContainerStarted","Data":"fdd2b39b84a0f4f66398498270c78c957a7df7c0d510dfca3f97238e24ed22e0"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.275592 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-mgp4f" event={"ID":"16359c7a-73d8-4c9b-bb45-2c81c5475330","Type":"ContainerStarted","Data":"6df2bf5008a1d4aa4acaf0f1555f04911e13454b0a3cbf987740d06347795089"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.277551 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c876d64f-4c6rh" event={"ID":"c15434d2-166c-4919-ac7f-99ef8bf909b4","Type":"ContainerStarted","Data":"bb26c8e636881a4430200dfae2f9bd97dca169ccf5ffd43097998dd9b2789126"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.282750 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-c847d79b6-kwhvk" event={"ID":"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9","Type":"ContainerStarted","Data":"1c77b650f3200c263f4667064c429b57b4618a494eb536124425c43d2a87e588"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.310689 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.315966 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ea81429-d4fc-42e1-b626-dfa345c7f698-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.312700 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" event={"ID":"644adf64-d8e7-40ff-8075-2f9595faa559","Type":"ContainerStarted","Data":"0f9a7e51f4cb5f2380467bd8f623792e25c24ef307eb0c8ab3415ff49205a14a"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.318663 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-dbzrq" event={"ID":"589b62f4-a91b-43b0-b49b-b303dcba8a67","Type":"ContainerStarted","Data":"93ebeb68d09c84454c9874bc8eb1e771d52af1bbf7745215019b84f253b8833f"} Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.376035 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-l2wlj" podStartSLOduration=7.376014061 podStartE2EDuration="7.376014061s" podCreationTimestamp="2025-12-10 00:56:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:30.284659347 +0000 UTC m=+1563.362616464" watchObservedRunningTime="2025-12-10 00:56:30.376014061 +0000 UTC m=+1563.453971178" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.599588 4884 scope.go:117] "RemoveContainer" containerID="a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707" Dec 10 00:56:30 crc kubenswrapper[4884]: E1210 00:56:30.610915 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707\": container with ID starting with a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707 not found: ID does not exist" containerID="a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.610974 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707"} err="failed to get container status \"a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707\": rpc error: code = NotFound desc = could not find container \"a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707\": container with ID starting with a6cdd0a25a2c6e4fcb93df87c2fde79b7dd78743c2792ddb1b99bb6ea3fdb707 not found: ID does not exist" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.611002 4884 scope.go:117] "RemoveContainer" containerID="d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.622485 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.674291 4884 scope.go:117] "RemoveContainer" containerID="d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602" Dec 10 00:56:30 crc kubenswrapper[4884]: E1210 00:56:30.684494 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602\": container with ID starting with d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602 not found: ID does not exist" containerID="d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.684550 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602"} err="failed to get container status \"d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602\": rpc error: code = NotFound desc = could not find container \"d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602\": container with ID starting with d2102217315aed94a56d5a94f0bda176c97030b4db98a27bef847bce9a9fb602 not found: ID does not exist" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.774303 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.817423 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-8c56c4785-52p9j"] Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.842541 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-8c56c4785-52p9j"] Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.860578 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-79b9f4dd9f-25vz6"] Dec 10 00:56:30 crc kubenswrapper[4884]: I1210 00:56:30.874102 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-79b9f4dd9f-25vz6"] Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.304309 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="341036d7-5d9f-493a-b043-11d6517c390d" path="/var/lib/kubelet/pods/341036d7-5d9f-493a-b043-11d6517c390d/volumes" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.305496 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f" path="/var/lib/kubelet/pods/6e3af00d-1ca3-4a2a-b6bc-7a8560ccf28f/volumes" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.306045 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ea81429-d4fc-42e1-b626-dfa345c7f698" path="/var/lib/kubelet/pods/9ea81429-d4fc-42e1-b626-dfa345c7f698/volumes" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.356197 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-dbzrq" event={"ID":"589b62f4-a91b-43b0-b49b-b303dcba8a67","Type":"ContainerStarted","Data":"151889c10087002c761da4afeefc56fafbf5a2b7e1f1449d485d869860e106a4"} Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.370877 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5867565df6-2bb6n" event={"ID":"9baa9257-9a08-427f-bdec-1fa28a81303f","Type":"ContainerStarted","Data":"8c6bef2d475c878cd33e3856ad8656debe5d60deda7809998e24cae73d581c21"} Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.372082 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.379053 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-dbzrq" podStartSLOduration=8.379033874 podStartE2EDuration="8.379033874s" podCreationTimestamp="2025-12-10 00:56:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:31.372207938 +0000 UTC m=+1564.450165055" watchObservedRunningTime="2025-12-10 00:56:31.379033874 +0000 UTC m=+1564.456990991" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.396705 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffc65c2a-883e-4939-8685-a0d77fd1c717","Type":"ContainerStarted","Data":"57f3ca6ac92b18e8bfa43c024c9f662a7e7fb469a07b9822166b549849291ac8"} Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.403687 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-5867565df6-2bb6n" podStartSLOduration=9.40366755 podStartE2EDuration="9.40366755s" podCreationTimestamp="2025-12-10 00:56:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:31.392150488 +0000 UTC m=+1564.470107615" watchObservedRunningTime="2025-12-10 00:56:31.40366755 +0000 UTC m=+1564.481624667" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.408648 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f64f-account-create-update-qtqz4" event={"ID":"4a5d45a0-51a8-4925-bbba-2b9d42ac9114","Type":"ContainerStarted","Data":"bdd473f8bd0441b20f04e9249aa99a41fb0850852f85e17fa80af369ddbcb3f7"} Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.412242 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-c589-account-create-update-c99cj" event={"ID":"bbf4c134-f53b-4613-bc36-92d89f55f8be","Type":"ContainerStarted","Data":"4baacdec18785b8a5f5b8f64d33567ba2c1ff2bef77bb3126d0206aee00e2f7c"} Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.415207 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" event={"ID":"644adf64-d8e7-40ff-8075-2f9595faa559","Type":"ContainerStarted","Data":"abf96baf556121e8ee26fee9715526ab1776b8da42c6823b98cb16c10544def9"} Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.415249 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.421645 4884 generic.go:334] "Generic (PLEG): container finished" podID="636dddde-3987-4843-917d-956aedd66a22" containerID="5a18b496c72d632255aaa36434d4eea0fa18107f47c02e03dd150602063028eb" exitCode=0 Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.421707 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-l2wlj" event={"ID":"636dddde-3987-4843-917d-956aedd66a22","Type":"ContainerDied","Data":"5a18b496c72d632255aaa36434d4eea0fa18107f47c02e03dd150602063028eb"} Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.426283 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-c847d79b6-kwhvk" event={"ID":"d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9","Type":"ContainerStarted","Data":"cc0311f0cb3fb4bb90b2144e1ceec81cfe814b7ece27b85be8898030bbd2cfba"} Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.426443 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.438479 4884 generic.go:334] "Generic (PLEG): container finished" podID="16359c7a-73d8-4c9b-bb45-2c81c5475330" containerID="d8146157788d7ae948e46a96497a7398e021c95dd7d40fe13f4c5126c166fdfd" exitCode=0 Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.438548 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-mgp4f" event={"ID":"16359c7a-73d8-4c9b-bb45-2c81c5475330","Type":"ContainerDied","Data":"d8146157788d7ae948e46a96497a7398e021c95dd7d40fe13f4c5126c166fdfd"} Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.443119 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" event={"ID":"73959270-9ff8-499d-8316-dd45b701ad6f","Type":"ContainerStarted","Data":"d30367d1f5b9acc69f71220425338a89e739d4a4c5a105b99a40a93b2bb5653d"} Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.443773 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.444174 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-c589-account-create-update-c99cj" podStartSLOduration=8.444157377 podStartE2EDuration="8.444157377s" podCreationTimestamp="2025-12-10 00:56:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:31.433974761 +0000 UTC m=+1564.511931878" watchObservedRunningTime="2025-12-10 00:56:31.444157377 +0000 UTC m=+1564.522114494" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.457595 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-c847d79b6-kwhvk" podStartSLOduration=8.457576831 podStartE2EDuration="8.457576831s" podCreationTimestamp="2025-12-10 00:56:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:31.455277898 +0000 UTC m=+1564.533235035" watchObservedRunningTime="2025-12-10 00:56:31.457576831 +0000 UTC m=+1564.535533948" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.474268 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" podStartSLOduration=8.474250242 podStartE2EDuration="8.474250242s" podCreationTimestamp="2025-12-10 00:56:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:31.472622468 +0000 UTC m=+1564.550579595" watchObservedRunningTime="2025-12-10 00:56:31.474250242 +0000 UTC m=+1564.552207359" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.534493 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fzs89"] Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.543584 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" podStartSLOduration=9.543564909 podStartE2EDuration="9.543564909s" podCreationTimestamp="2025-12-10 00:56:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:31.526771344 +0000 UTC m=+1564.604728471" watchObservedRunningTime="2025-12-10 00:56:31.543564909 +0000 UTC m=+1564.621522026" Dec 10 00:56:31 crc kubenswrapper[4884]: I1210 00:56:31.568822 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:56:31 crc kubenswrapper[4884]: W1210 00:56:31.570495 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4518e9df_bc84_45c9_ae55_3af2cff56a19.slice/crio-a64e5122aa68fa67273d6c9d69df22714c7b7edca57e53eac8e31f82ee3afaa2 WatchSource:0}: Error finding container a64e5122aa68fa67273d6c9d69df22714c7b7edca57e53eac8e31f82ee3afaa2: Status 404 returned error can't find the container with id a64e5122aa68fa67273d6c9d69df22714c7b7edca57e53eac8e31f82ee3afaa2 Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.277971 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-321f-account-create-update-sgsmd" Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.394466 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhbq4\" (UniqueName: \"kubernetes.io/projected/253e3b48-23a7-4433-9f12-ae9901a3260d-kube-api-access-hhbq4\") pod \"253e3b48-23a7-4433-9f12-ae9901a3260d\" (UID: \"253e3b48-23a7-4433-9f12-ae9901a3260d\") " Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.394836 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/253e3b48-23a7-4433-9f12-ae9901a3260d-operator-scripts\") pod \"253e3b48-23a7-4433-9f12-ae9901a3260d\" (UID: \"253e3b48-23a7-4433-9f12-ae9901a3260d\") " Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.395400 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/253e3b48-23a7-4433-9f12-ae9901a3260d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "253e3b48-23a7-4433-9f12-ae9901a3260d" (UID: "253e3b48-23a7-4433-9f12-ae9901a3260d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.397520 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/253e3b48-23a7-4433-9f12-ae9901a3260d-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.419600 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/253e3b48-23a7-4433-9f12-ae9901a3260d-kube-api-access-hhbq4" (OuterVolumeSpecName: "kube-api-access-hhbq4") pod "253e3b48-23a7-4433-9f12-ae9901a3260d" (UID: "253e3b48-23a7-4433-9f12-ae9901a3260d"). InnerVolumeSpecName "kube-api-access-hhbq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.489953 4884 generic.go:334] "Generic (PLEG): container finished" podID="589b62f4-a91b-43b0-b49b-b303dcba8a67" containerID="151889c10087002c761da4afeefc56fafbf5a2b7e1f1449d485d869860e106a4" exitCode=0 Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.490539 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-dbzrq" event={"ID":"589b62f4-a91b-43b0-b49b-b303dcba8a67","Type":"ContainerDied","Data":"151889c10087002c761da4afeefc56fafbf5a2b7e1f1449d485d869860e106a4"} Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.496172 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-6c876d64f-4c6rh" event={"ID":"c15434d2-166c-4919-ac7f-99ef8bf909b4","Type":"ContainerStarted","Data":"03812bb66ba39a1b9e74dc31fb2cee0ce9d07c49d143d0ed1a294962e603903e"} Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.497867 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.502251 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhbq4\" (UniqueName: \"kubernetes.io/projected/253e3b48-23a7-4433-9f12-ae9901a3260d-kube-api-access-hhbq4\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.519627 4884 generic.go:334] "Generic (PLEG): container finished" podID="9baa9257-9a08-427f-bdec-1fa28a81303f" containerID="8c6bef2d475c878cd33e3856ad8656debe5d60deda7809998e24cae73d581c21" exitCode=1 Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.519897 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5867565df6-2bb6n" event={"ID":"9baa9257-9a08-427f-bdec-1fa28a81303f","Type":"ContainerDied","Data":"8c6bef2d475c878cd33e3856ad8656debe5d60deda7809998e24cae73d581c21"} Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.520767 4884 scope.go:117] "RemoveContainer" containerID="8c6bef2d475c878cd33e3856ad8656debe5d60deda7809998e24cae73d581c21" Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.522518 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffc65c2a-883e-4939-8685-a0d77fd1c717","Type":"ContainerStarted","Data":"2aa7043edc526fac845b081dca5ecf1351756dbdf8416161b555ff9a7748ede0"} Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.529032 4884 generic.go:334] "Generic (PLEG): container finished" podID="4a5d45a0-51a8-4925-bbba-2b9d42ac9114" containerID="be45d21ccc0b4620e7e950ab8d945e211dca47942735dc09cecd61ad45e57e4f" exitCode=0 Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.529262 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f64f-account-create-update-qtqz4" event={"ID":"4a5d45a0-51a8-4925-bbba-2b9d42ac9114","Type":"ContainerDied","Data":"be45d21ccc0b4620e7e950ab8d945e211dca47942735dc09cecd61ad45e57e4f"} Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.534422 4884 generic.go:334] "Generic (PLEG): container finished" podID="bbf4c134-f53b-4613-bc36-92d89f55f8be" containerID="4baacdec18785b8a5f5b8f64d33567ba2c1ff2bef77bb3126d0206aee00e2f7c" exitCode=0 Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.534508 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-c589-account-create-update-c99cj" event={"ID":"bbf4c134-f53b-4613-bc36-92d89f55f8be","Type":"ContainerDied","Data":"4baacdec18785b8a5f5b8f64d33567ba2c1ff2bef77bb3126d0206aee00e2f7c"} Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.536247 4884 generic.go:334] "Generic (PLEG): container finished" podID="73959270-9ff8-499d-8316-dd45b701ad6f" containerID="d30367d1f5b9acc69f71220425338a89e739d4a4c5a105b99a40a93b2bb5653d" exitCode=1 Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.536297 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" event={"ID":"73959270-9ff8-499d-8316-dd45b701ad6f","Type":"ContainerDied","Data":"d30367d1f5b9acc69f71220425338a89e739d4a4c5a105b99a40a93b2bb5653d"} Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.537133 4884 scope.go:117] "RemoveContainer" containerID="d30367d1f5b9acc69f71220425338a89e739d4a4c5a105b99a40a93b2bb5653d" Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.540895 4884 generic.go:334] "Generic (PLEG): container finished" podID="4518e9df-bc84-45c9-ae55-3af2cff56a19" containerID="56f9b6221008ae374ce3b91142a02d2c5745a3cc0899006e0615c3b8c1037b1c" exitCode=0 Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.540941 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fzs89" event={"ID":"4518e9df-bc84-45c9-ae55-3af2cff56a19","Type":"ContainerDied","Data":"56f9b6221008ae374ce3b91142a02d2c5745a3cc0899006e0615c3b8c1037b1c"} Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.540959 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fzs89" event={"ID":"4518e9df-bc84-45c9-ae55-3af2cff56a19","Type":"ContainerStarted","Data":"a64e5122aa68fa67273d6c9d69df22714c7b7edca57e53eac8e31f82ee3afaa2"} Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.543692 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-321f-account-create-update-sgsmd" Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.547707 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-321f-account-create-update-sgsmd" event={"ID":"253e3b48-23a7-4433-9f12-ae9901a3260d","Type":"ContainerDied","Data":"675c72df13402d19b084abc2e9133a2beec115d9ace64a8dbdbde55f26d15905"} Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.547755 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="675c72df13402d19b084abc2e9133a2beec115d9ace64a8dbdbde55f26d15905" Dec 10 00:56:32 crc kubenswrapper[4884]: I1210 00:56:32.591090 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-6c876d64f-4c6rh" podStartSLOduration=10.591068476 podStartE2EDuration="10.591068476s" podCreationTimestamp="2025-12-10 00:56:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:56:32.548768671 +0000 UTC m=+1565.626725798" watchObservedRunningTime="2025-12-10 00:56:32.591068476 +0000 UTC m=+1565.669025593" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.284215 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-mgp4f" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.287720 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:56:33 crc kubenswrapper[4884]: E1210 00:56:33.288054 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.293354 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-l2wlj" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.419357 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/16359c7a-73d8-4c9b-bb45-2c81c5475330-operator-scripts\") pod \"16359c7a-73d8-4c9b-bb45-2c81c5475330\" (UID: \"16359c7a-73d8-4c9b-bb45-2c81c5475330\") " Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.419604 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-868mr\" (UniqueName: \"kubernetes.io/projected/16359c7a-73d8-4c9b-bb45-2c81c5475330-kube-api-access-868mr\") pod \"16359c7a-73d8-4c9b-bb45-2c81c5475330\" (UID: \"16359c7a-73d8-4c9b-bb45-2c81c5475330\") " Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.419681 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2qrt\" (UniqueName: \"kubernetes.io/projected/636dddde-3987-4843-917d-956aedd66a22-kube-api-access-p2qrt\") pod \"636dddde-3987-4843-917d-956aedd66a22\" (UID: \"636dddde-3987-4843-917d-956aedd66a22\") " Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.419783 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/636dddde-3987-4843-917d-956aedd66a22-operator-scripts\") pod \"636dddde-3987-4843-917d-956aedd66a22\" (UID: \"636dddde-3987-4843-917d-956aedd66a22\") " Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.420887 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/636dddde-3987-4843-917d-956aedd66a22-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "636dddde-3987-4843-917d-956aedd66a22" (UID: "636dddde-3987-4843-917d-956aedd66a22"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.421082 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16359c7a-73d8-4c9b-bb45-2c81c5475330-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "16359c7a-73d8-4c9b-bb45-2c81c5475330" (UID: "16359c7a-73d8-4c9b-bb45-2c81c5475330"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.423984 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/636dddde-3987-4843-917d-956aedd66a22-kube-api-access-p2qrt" (OuterVolumeSpecName: "kube-api-access-p2qrt") pod "636dddde-3987-4843-917d-956aedd66a22" (UID: "636dddde-3987-4843-917d-956aedd66a22"). InnerVolumeSpecName "kube-api-access-p2qrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.424049 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16359c7a-73d8-4c9b-bb45-2c81c5475330-kube-api-access-868mr" (OuterVolumeSpecName: "kube-api-access-868mr") pod "16359c7a-73d8-4c9b-bb45-2c81c5475330" (UID: "16359c7a-73d8-4c9b-bb45-2c81c5475330"). InnerVolumeSpecName "kube-api-access-868mr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.522193 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/636dddde-3987-4843-917d-956aedd66a22-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.522508 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/16359c7a-73d8-4c9b-bb45-2c81c5475330-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.522533 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-868mr\" (UniqueName: \"kubernetes.io/projected/16359c7a-73d8-4c9b-bb45-2c81c5475330-kube-api-access-868mr\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.522552 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2qrt\" (UniqueName: \"kubernetes.io/projected/636dddde-3987-4843-917d-956aedd66a22-kube-api-access-p2qrt\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.565189 4884 generic.go:334] "Generic (PLEG): container finished" podID="9baa9257-9a08-427f-bdec-1fa28a81303f" containerID="b1ff2b7c8ccb1c6a9450addbd0b45a9673dc76ff8280b7e123b71a1411fbe766" exitCode=1 Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.565279 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5867565df6-2bb6n" event={"ID":"9baa9257-9a08-427f-bdec-1fa28a81303f","Type":"ContainerDied","Data":"b1ff2b7c8ccb1c6a9450addbd0b45a9673dc76ff8280b7e123b71a1411fbe766"} Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.565310 4884 scope.go:117] "RemoveContainer" containerID="8c6bef2d475c878cd33e3856ad8656debe5d60deda7809998e24cae73d581c21" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.565963 4884 scope.go:117] "RemoveContainer" containerID="b1ff2b7c8ccb1c6a9450addbd0b45a9673dc76ff8280b7e123b71a1411fbe766" Dec 10 00:56:33 crc kubenswrapper[4884]: E1210 00:56:33.566171 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5867565df6-2bb6n_openstack(9baa9257-9a08-427f-bdec-1fa28a81303f)\"" pod="openstack/heat-api-5867565df6-2bb6n" podUID="9baa9257-9a08-427f-bdec-1fa28a81303f" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.576104 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-mgp4f" event={"ID":"16359c7a-73d8-4c9b-bb45-2c81c5475330","Type":"ContainerDied","Data":"6df2bf5008a1d4aa4acaf0f1555f04911e13454b0a3cbf987740d06347795089"} Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.576142 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6df2bf5008a1d4aa4acaf0f1555f04911e13454b0a3cbf987740d06347795089" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.576194 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-mgp4f" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.593697 4884 generic.go:334] "Generic (PLEG): container finished" podID="73959270-9ff8-499d-8316-dd45b701ad6f" containerID="06114be5c443c45a6c604c1ec02a7ac8743a4478ab8ca27af85426ae92a0d9dc" exitCode=1 Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.593759 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" event={"ID":"73959270-9ff8-499d-8316-dd45b701ad6f","Type":"ContainerDied","Data":"06114be5c443c45a6c604c1ec02a7ac8743a4478ab8ca27af85426ae92a0d9dc"} Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.594372 4884 scope.go:117] "RemoveContainer" containerID="06114be5c443c45a6c604c1ec02a7ac8743a4478ab8ca27af85426ae92a0d9dc" Dec 10 00:56:33 crc kubenswrapper[4884]: E1210 00:56:33.594621 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-566fc6dbdb-qb9bs_openstack(73959270-9ff8-499d-8316-dd45b701ad6f)\"" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" podUID="73959270-9ff8-499d-8316-dd45b701ad6f" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.596870 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-l2wlj" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.597138 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-l2wlj" event={"ID":"636dddde-3987-4843-917d-956aedd66a22","Type":"ContainerDied","Data":"fdd2b39b84a0f4f66398498270c78c957a7df7c0d510dfca3f97238e24ed22e0"} Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.597167 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fdd2b39b84a0f4f66398498270c78c957a7df7c0d510dfca3f97238e24ed22e0" Dec 10 00:56:33 crc kubenswrapper[4884]: I1210 00:56:33.641220 4884 scope.go:117] "RemoveContainer" containerID="d30367d1f5b9acc69f71220425338a89e739d4a4c5a105b99a40a93b2bb5653d" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.047173 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f64f-account-create-update-qtqz4" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.153533 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a5d45a0-51a8-4925-bbba-2b9d42ac9114-operator-scripts\") pod \"4a5d45a0-51a8-4925-bbba-2b9d42ac9114\" (UID: \"4a5d45a0-51a8-4925-bbba-2b9d42ac9114\") " Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.153895 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqngz\" (UniqueName: \"kubernetes.io/projected/4a5d45a0-51a8-4925-bbba-2b9d42ac9114-kube-api-access-jqngz\") pod \"4a5d45a0-51a8-4925-bbba-2b9d42ac9114\" (UID: \"4a5d45a0-51a8-4925-bbba-2b9d42ac9114\") " Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.155714 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4a5d45a0-51a8-4925-bbba-2b9d42ac9114-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4a5d45a0-51a8-4925-bbba-2b9d42ac9114" (UID: "4a5d45a0-51a8-4925-bbba-2b9d42ac9114"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.169144 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a5d45a0-51a8-4925-bbba-2b9d42ac9114-kube-api-access-jqngz" (OuterVolumeSpecName: "kube-api-access-jqngz") pod "4a5d45a0-51a8-4925-bbba-2b9d42ac9114" (UID: "4a5d45a0-51a8-4925-bbba-2b9d42ac9114"). InnerVolumeSpecName "kube-api-access-jqngz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.258274 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqngz\" (UniqueName: \"kubernetes.io/projected/4a5d45a0-51a8-4925-bbba-2b9d42ac9114-kube-api-access-jqngz\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.258507 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4a5d45a0-51a8-4925-bbba-2b9d42ac9114-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:34 crc kubenswrapper[4884]: E1210 00:56:34.446241 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4518e9df_bc84_45c9_ae55_3af2cff56a19.slice/crio-56a3b8fbd64acee14002b0b04e72a9b51f8785a8dc7a25b1d052ab6da0edb1d5.scope\": RecentStats: unable to find data in memory cache]" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.521524 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-dbzrq" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.529585 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-c589-account-create-update-c99cj" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.618027 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f64f-account-create-update-qtqz4" event={"ID":"4a5d45a0-51a8-4925-bbba-2b9d42ac9114","Type":"ContainerDied","Data":"bdd473f8bd0441b20f04e9249aa99a41fb0850852f85e17fa80af369ddbcb3f7"} Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.618072 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bdd473f8bd0441b20f04e9249aa99a41fb0850852f85e17fa80af369ddbcb3f7" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.618159 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f64f-account-create-update-qtqz4" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.639686 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-c589-account-create-update-c99cj" event={"ID":"bbf4c134-f53b-4613-bc36-92d89f55f8be","Type":"ContainerDied","Data":"b3d10f490f060ee20ce474dae1ae0da53ed0ad0e7fc4231c7227e34e966846c5"} Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.639734 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3d10f490f060ee20ce474dae1ae0da53ed0ad0e7fc4231c7227e34e966846c5" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.639795 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-c589-account-create-update-c99cj" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.659548 4884 scope.go:117] "RemoveContainer" containerID="06114be5c443c45a6c604c1ec02a7ac8743a4478ab8ca27af85426ae92a0d9dc" Dec 10 00:56:34 crc kubenswrapper[4884]: E1210 00:56:34.659758 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-566fc6dbdb-qb9bs_openstack(73959270-9ff8-499d-8316-dd45b701ad6f)\"" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" podUID="73959270-9ff8-499d-8316-dd45b701ad6f" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.674318 4884 generic.go:334] "Generic (PLEG): container finished" podID="4518e9df-bc84-45c9-ae55-3af2cff56a19" containerID="56a3b8fbd64acee14002b0b04e72a9b51f8785a8dc7a25b1d052ab6da0edb1d5" exitCode=0 Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.674678 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fzs89" event={"ID":"4518e9df-bc84-45c9-ae55-3af2cff56a19","Type":"ContainerDied","Data":"56a3b8fbd64acee14002b0b04e72a9b51f8785a8dc7a25b1d052ab6da0edb1d5"} Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.678984 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/589b62f4-a91b-43b0-b49b-b303dcba8a67-operator-scripts\") pod \"589b62f4-a91b-43b0-b49b-b303dcba8a67\" (UID: \"589b62f4-a91b-43b0-b49b-b303dcba8a67\") " Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.679145 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t47sz\" (UniqueName: \"kubernetes.io/projected/589b62f4-a91b-43b0-b49b-b303dcba8a67-kube-api-access-t47sz\") pod \"589b62f4-a91b-43b0-b49b-b303dcba8a67\" (UID: \"589b62f4-a91b-43b0-b49b-b303dcba8a67\") " Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.679216 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7dr4\" (UniqueName: \"kubernetes.io/projected/bbf4c134-f53b-4613-bc36-92d89f55f8be-kube-api-access-f7dr4\") pod \"bbf4c134-f53b-4613-bc36-92d89f55f8be\" (UID: \"bbf4c134-f53b-4613-bc36-92d89f55f8be\") " Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.679463 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbf4c134-f53b-4613-bc36-92d89f55f8be-operator-scripts\") pod \"bbf4c134-f53b-4613-bc36-92d89f55f8be\" (UID: \"bbf4c134-f53b-4613-bc36-92d89f55f8be\") " Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.680717 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/589b62f4-a91b-43b0-b49b-b303dcba8a67-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "589b62f4-a91b-43b0-b49b-b303dcba8a67" (UID: "589b62f4-a91b-43b0-b49b-b303dcba8a67"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.680978 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/589b62f4-a91b-43b0-b49b-b303dcba8a67-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.680727 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bbf4c134-f53b-4613-bc36-92d89f55f8be-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bbf4c134-f53b-4613-bc36-92d89f55f8be" (UID: "bbf4c134-f53b-4613-bc36-92d89f55f8be"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.683919 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-dbzrq" event={"ID":"589b62f4-a91b-43b0-b49b-b303dcba8a67","Type":"ContainerDied","Data":"93ebeb68d09c84454c9874bc8eb1e771d52af1bbf7745215019b84f253b8833f"} Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.684028 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93ebeb68d09c84454c9874bc8eb1e771d52af1bbf7745215019b84f253b8833f" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.684161 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-dbzrq" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.689599 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/589b62f4-a91b-43b0-b49b-b303dcba8a67-kube-api-access-t47sz" (OuterVolumeSpecName: "kube-api-access-t47sz") pod "589b62f4-a91b-43b0-b49b-b303dcba8a67" (UID: "589b62f4-a91b-43b0-b49b-b303dcba8a67"). InnerVolumeSpecName "kube-api-access-t47sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.693191 4884 scope.go:117] "RemoveContainer" containerID="b1ff2b7c8ccb1c6a9450addbd0b45a9673dc76ff8280b7e123b71a1411fbe766" Dec 10 00:56:34 crc kubenswrapper[4884]: E1210 00:56:34.693398 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5867565df6-2bb6n_openstack(9baa9257-9a08-427f-bdec-1fa28a81303f)\"" pod="openstack/heat-api-5867565df6-2bb6n" podUID="9baa9257-9a08-427f-bdec-1fa28a81303f" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.696255 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffc65c2a-883e-4939-8685-a0d77fd1c717","Type":"ContainerStarted","Data":"4675e92b33c87f5c40458ff6f512488f0aaf538368438733586e9804d405fc1b"} Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.696609 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbf4c134-f53b-4613-bc36-92d89f55f8be-kube-api-access-f7dr4" (OuterVolumeSpecName: "kube-api-access-f7dr4") pod "bbf4c134-f53b-4613-bc36-92d89f55f8be" (UID: "bbf4c134-f53b-4613-bc36-92d89f55f8be"). InnerVolumeSpecName "kube-api-access-f7dr4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.787960 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbf4c134-f53b-4613-bc36-92d89f55f8be-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.787995 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t47sz\" (UniqueName: \"kubernetes.io/projected/589b62f4-a91b-43b0-b49b-b303dcba8a67-kube-api-access-t47sz\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.788006 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7dr4\" (UniqueName: \"kubernetes.io/projected/bbf4c134-f53b-4613-bc36-92d89f55f8be-kube-api-access-f7dr4\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:34 crc kubenswrapper[4884]: I1210 00:56:34.850556 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:36 crc kubenswrapper[4884]: I1210 00:56:36.718592 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffc65c2a-883e-4939-8685-a0d77fd1c717","Type":"ContainerStarted","Data":"4a44aac043390f7847136233ea4c21a4c19cd7cde2b55da6bb01ed75fe7a6ae9"} Dec 10 00:56:37 crc kubenswrapper[4884]: I1210 00:56:37.439480 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:37 crc kubenswrapper[4884]: I1210 00:56:37.439741 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:37 crc kubenswrapper[4884]: I1210 00:56:37.440498 4884 scope.go:117] "RemoveContainer" containerID="06114be5c443c45a6c604c1ec02a7ac8743a4478ab8ca27af85426ae92a0d9dc" Dec 10 00:56:37 crc kubenswrapper[4884]: E1210 00:56:37.440729 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-566fc6dbdb-qb9bs_openstack(73959270-9ff8-499d-8316-dd45b701ad6f)\"" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" podUID="73959270-9ff8-499d-8316-dd45b701ad6f" Dec 10 00:56:37 crc kubenswrapper[4884]: I1210 00:56:37.455476 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:37 crc kubenswrapper[4884]: I1210 00:56:37.455520 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:37 crc kubenswrapper[4884]: I1210 00:56:37.456276 4884 scope.go:117] "RemoveContainer" containerID="b1ff2b7c8ccb1c6a9450addbd0b45a9673dc76ff8280b7e123b71a1411fbe766" Dec 10 00:56:37 crc kubenswrapper[4884]: E1210 00:56:37.456522 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-5867565df6-2bb6n_openstack(9baa9257-9a08-427f-bdec-1fa28a81303f)\"" pod="openstack/heat-api-5867565df6-2bb6n" podUID="9baa9257-9a08-427f-bdec-1fa28a81303f" Dec 10 00:56:37 crc kubenswrapper[4884]: I1210 00:56:37.728337 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fzs89" event={"ID":"4518e9df-bc84-45c9-ae55-3af2cff56a19","Type":"ContainerStarted","Data":"2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008"} Dec 10 00:56:37 crc kubenswrapper[4884]: I1210 00:56:37.747596 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fzs89" podStartSLOduration=4.446219074 podStartE2EDuration="8.747581669s" podCreationTimestamp="2025-12-10 00:56:29 +0000 UTC" firstStartedPulling="2025-12-10 00:56:32.5738595 +0000 UTC m=+1565.651816617" lastFinishedPulling="2025-12-10 00:56:36.875222095 +0000 UTC m=+1569.953179212" observedRunningTime="2025-12-10 00:56:37.744381672 +0000 UTC m=+1570.822338789" watchObservedRunningTime="2025-12-10 00:56:37.747581669 +0000 UTC m=+1570.825538786" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.476717 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-f9dh7"] Dec 10 00:56:38 crc kubenswrapper[4884]: E1210 00:56:38.477115 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="253e3b48-23a7-4433-9f12-ae9901a3260d" containerName="mariadb-account-create-update" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.477133 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="253e3b48-23a7-4433-9f12-ae9901a3260d" containerName="mariadb-account-create-update" Dec 10 00:56:38 crc kubenswrapper[4884]: E1210 00:56:38.477147 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a5d45a0-51a8-4925-bbba-2b9d42ac9114" containerName="mariadb-account-create-update" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.477155 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a5d45a0-51a8-4925-bbba-2b9d42ac9114" containerName="mariadb-account-create-update" Dec 10 00:56:38 crc kubenswrapper[4884]: E1210 00:56:38.477168 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbf4c134-f53b-4613-bc36-92d89f55f8be" containerName="mariadb-account-create-update" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.477175 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbf4c134-f53b-4613-bc36-92d89f55f8be" containerName="mariadb-account-create-update" Dec 10 00:56:38 crc kubenswrapper[4884]: E1210 00:56:38.477188 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="636dddde-3987-4843-917d-956aedd66a22" containerName="mariadb-database-create" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.477194 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="636dddde-3987-4843-917d-956aedd66a22" containerName="mariadb-database-create" Dec 10 00:56:38 crc kubenswrapper[4884]: E1210 00:56:38.477205 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="589b62f4-a91b-43b0-b49b-b303dcba8a67" containerName="mariadb-database-create" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.477211 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="589b62f4-a91b-43b0-b49b-b303dcba8a67" containerName="mariadb-database-create" Dec 10 00:56:38 crc kubenswrapper[4884]: E1210 00:56:38.477240 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16359c7a-73d8-4c9b-bb45-2c81c5475330" containerName="mariadb-database-create" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.477247 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="16359c7a-73d8-4c9b-bb45-2c81c5475330" containerName="mariadb-database-create" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.477410 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="16359c7a-73d8-4c9b-bb45-2c81c5475330" containerName="mariadb-database-create" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.477421 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="589b62f4-a91b-43b0-b49b-b303dcba8a67" containerName="mariadb-database-create" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.477449 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="636dddde-3987-4843-917d-956aedd66a22" containerName="mariadb-database-create" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.477463 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="253e3b48-23a7-4433-9f12-ae9901a3260d" containerName="mariadb-account-create-update" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.477478 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a5d45a0-51a8-4925-bbba-2b9d42ac9114" containerName="mariadb-account-create-update" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.477488 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbf4c134-f53b-4613-bc36-92d89f55f8be" containerName="mariadb-account-create-update" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.479690 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.483517 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.483893 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.498796 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mdmcl" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.503721 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-f9dh7"] Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.696856 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-config-data\") pod \"nova-cell0-conductor-db-sync-f9dh7\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.696904 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-f9dh7\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.697018 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-scripts\") pod \"nova-cell0-conductor-db-sync-f9dh7\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.697046 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvzpr\" (UniqueName: \"kubernetes.io/projected/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-kube-api-access-jvzpr\") pod \"nova-cell0-conductor-db-sync-f9dh7\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.799052 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-scripts\") pod \"nova-cell0-conductor-db-sync-f9dh7\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.799122 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvzpr\" (UniqueName: \"kubernetes.io/projected/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-kube-api-access-jvzpr\") pod \"nova-cell0-conductor-db-sync-f9dh7\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.799215 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-config-data\") pod \"nova-cell0-conductor-db-sync-f9dh7\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.799243 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-f9dh7\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.804232 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-f9dh7\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.815064 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-scripts\") pod \"nova-cell0-conductor-db-sync-f9dh7\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.815364 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-config-data\") pod \"nova-cell0-conductor-db-sync-f9dh7\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:38 crc kubenswrapper[4884]: I1210 00:56:38.820298 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvzpr\" (UniqueName: \"kubernetes.io/projected/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-kube-api-access-jvzpr\") pod \"nova-cell0-conductor-db-sync-f9dh7\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:39 crc kubenswrapper[4884]: I1210 00:56:39.098689 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:56:39 crc kubenswrapper[4884]: I1210 00:56:39.641259 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-f9dh7"] Dec 10 00:56:39 crc kubenswrapper[4884]: I1210 00:56:39.747324 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffc65c2a-883e-4939-8685-a0d77fd1c717","Type":"ContainerStarted","Data":"0e46244df3dd76d9a6626efc1a2e6ebc4991f71ce466cf7df69cb6dab9641b27"} Dec 10 00:56:39 crc kubenswrapper[4884]: I1210 00:56:39.747510 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="ceilometer-central-agent" containerID="cri-o://2aa7043edc526fac845b081dca5ecf1351756dbdf8416161b555ff9a7748ede0" gracePeriod=30 Dec 10 00:56:39 crc kubenswrapper[4884]: I1210 00:56:39.747731 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 00:56:39 crc kubenswrapper[4884]: I1210 00:56:39.747967 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="proxy-httpd" containerID="cri-o://0e46244df3dd76d9a6626efc1a2e6ebc4991f71ce466cf7df69cb6dab9641b27" gracePeriod=30 Dec 10 00:56:39 crc kubenswrapper[4884]: I1210 00:56:39.748015 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="sg-core" containerID="cri-o://4a44aac043390f7847136233ea4c21a4c19cd7cde2b55da6bb01ed75fe7a6ae9" gracePeriod=30 Dec 10 00:56:39 crc kubenswrapper[4884]: I1210 00:56:39.748046 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="ceilometer-notification-agent" containerID="cri-o://4675e92b33c87f5c40458ff6f512488f0aaf538368438733586e9804d405fc1b" gracePeriod=30 Dec 10 00:56:39 crc kubenswrapper[4884]: I1210 00:56:39.753858 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-f9dh7" event={"ID":"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670","Type":"ContainerStarted","Data":"31d275fe4e436dcd73ea923d457d6c1fb1e78e6fc3caca1a35bafde7311c943a"} Dec 10 00:56:39 crc kubenswrapper[4884]: I1210 00:56:39.788083 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.969987326 podStartE2EDuration="10.788064306s" podCreationTimestamp="2025-12-10 00:56:29 +0000 UTC" firstStartedPulling="2025-12-10 00:56:30.729258007 +0000 UTC m=+1563.807215124" lastFinishedPulling="2025-12-10 00:56:38.547334987 +0000 UTC m=+1571.625292104" observedRunningTime="2025-12-10 00:56:39.775650081 +0000 UTC m=+1572.853607208" watchObservedRunningTime="2025-12-10 00:56:39.788064306 +0000 UTC m=+1572.866021423" Dec 10 00:56:40 crc kubenswrapper[4884]: I1210 00:56:40.774863 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:40 crc kubenswrapper[4884]: I1210 00:56:40.775122 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:40 crc kubenswrapper[4884]: I1210 00:56:40.788775 4884 generic.go:334] "Generic (PLEG): container finished" podID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerID="0e46244df3dd76d9a6626efc1a2e6ebc4991f71ce466cf7df69cb6dab9641b27" exitCode=0 Dec 10 00:56:40 crc kubenswrapper[4884]: I1210 00:56:40.788802 4884 generic.go:334] "Generic (PLEG): container finished" podID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerID="4a44aac043390f7847136233ea4c21a4c19cd7cde2b55da6bb01ed75fe7a6ae9" exitCode=2 Dec 10 00:56:40 crc kubenswrapper[4884]: I1210 00:56:40.788810 4884 generic.go:334] "Generic (PLEG): container finished" podID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerID="4675e92b33c87f5c40458ff6f512488f0aaf538368438733586e9804d405fc1b" exitCode=0 Dec 10 00:56:40 crc kubenswrapper[4884]: I1210 00:56:40.788828 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffc65c2a-883e-4939-8685-a0d77fd1c717","Type":"ContainerDied","Data":"0e46244df3dd76d9a6626efc1a2e6ebc4991f71ce466cf7df69cb6dab9641b27"} Dec 10 00:56:40 crc kubenswrapper[4884]: I1210 00:56:40.788852 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffc65c2a-883e-4939-8685-a0d77fd1c717","Type":"ContainerDied","Data":"4a44aac043390f7847136233ea4c21a4c19cd7cde2b55da6bb01ed75fe7a6ae9"} Dec 10 00:56:40 crc kubenswrapper[4884]: I1210 00:56:40.788864 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffc65c2a-883e-4939-8685-a0d77fd1c717","Type":"ContainerDied","Data":"4675e92b33c87f5c40458ff6f512488f0aaf538368438733586e9804d405fc1b"} Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.067223 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-56f5c7d86c-hthhq" Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.149556 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-566fc6dbdb-qb9bs"] Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.312251 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-c847d79b6-kwhvk" Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.402102 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5867565df6-2bb6n"] Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.715119 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.818700 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" event={"ID":"73959270-9ff8-499d-8316-dd45b701ad6f","Type":"ContainerDied","Data":"c9aeae66de7cbdb697579c61388ddf2ef9b3f5c128142a53b65a142e3382bcb6"} Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.818762 4884 scope.go:117] "RemoveContainer" containerID="06114be5c443c45a6c604c1ec02a7ac8743a4478ab8ca27af85426ae92a0d9dc" Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.818878 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-566fc6dbdb-qb9bs" Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.846323 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-fzs89" podUID="4518e9df-bc84-45c9-ae55-3af2cff56a19" containerName="registry-server" probeResult="failure" output=< Dec 10 00:56:41 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 00:56:41 crc kubenswrapper[4884]: > Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.875089 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nt6dx\" (UniqueName: \"kubernetes.io/projected/73959270-9ff8-499d-8316-dd45b701ad6f-kube-api-access-nt6dx\") pod \"73959270-9ff8-499d-8316-dd45b701ad6f\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.875222 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-combined-ca-bundle\") pod \"73959270-9ff8-499d-8316-dd45b701ad6f\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.875245 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-config-data-custom\") pod \"73959270-9ff8-499d-8316-dd45b701ad6f\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.875273 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-config-data\") pod \"73959270-9ff8-499d-8316-dd45b701ad6f\" (UID: \"73959270-9ff8-499d-8316-dd45b701ad6f\") " Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.885954 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "73959270-9ff8-499d-8316-dd45b701ad6f" (UID: "73959270-9ff8-499d-8316-dd45b701ad6f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.892889 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73959270-9ff8-499d-8316-dd45b701ad6f-kube-api-access-nt6dx" (OuterVolumeSpecName: "kube-api-access-nt6dx") pod "73959270-9ff8-499d-8316-dd45b701ad6f" (UID: "73959270-9ff8-499d-8316-dd45b701ad6f"). InnerVolumeSpecName "kube-api-access-nt6dx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.939196 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "73959270-9ff8-499d-8316-dd45b701ad6f" (UID: "73959270-9ff8-499d-8316-dd45b701ad6f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.949677 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-config-data" (OuterVolumeSpecName: "config-data") pod "73959270-9ff8-499d-8316-dd45b701ad6f" (UID: "73959270-9ff8-499d-8316-dd45b701ad6f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.977322 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nt6dx\" (UniqueName: \"kubernetes.io/projected/73959270-9ff8-499d-8316-dd45b701ad6f-kube-api-access-nt6dx\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.977356 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.977364 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:41 crc kubenswrapper[4884]: I1210 00:56:41.977373 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73959270-9ff8-499d-8316-dd45b701ad6f-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.023742 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.149788 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-566fc6dbdb-qb9bs"] Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.159676 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-566fc6dbdb-qb9bs"] Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.179990 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-config-data-custom\") pod \"9baa9257-9a08-427f-bdec-1fa28a81303f\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.180297 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-combined-ca-bundle\") pod \"9baa9257-9a08-427f-bdec-1fa28a81303f\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.180456 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8wwl\" (UniqueName: \"kubernetes.io/projected/9baa9257-9a08-427f-bdec-1fa28a81303f-kube-api-access-j8wwl\") pod \"9baa9257-9a08-427f-bdec-1fa28a81303f\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.180554 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-config-data\") pod \"9baa9257-9a08-427f-bdec-1fa28a81303f\" (UID: \"9baa9257-9a08-427f-bdec-1fa28a81303f\") " Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.183288 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9baa9257-9a08-427f-bdec-1fa28a81303f" (UID: "9baa9257-9a08-427f-bdec-1fa28a81303f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.183708 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9baa9257-9a08-427f-bdec-1fa28a81303f-kube-api-access-j8wwl" (OuterVolumeSpecName: "kube-api-access-j8wwl") pod "9baa9257-9a08-427f-bdec-1fa28a81303f" (UID: "9baa9257-9a08-427f-bdec-1fa28a81303f"). InnerVolumeSpecName "kube-api-access-j8wwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.207480 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9baa9257-9a08-427f-bdec-1fa28a81303f" (UID: "9baa9257-9a08-427f-bdec-1fa28a81303f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.237703 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-config-data" (OuterVolumeSpecName: "config-data") pod "9baa9257-9a08-427f-bdec-1fa28a81303f" (UID: "9baa9257-9a08-427f-bdec-1fa28a81303f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.283789 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8wwl\" (UniqueName: \"kubernetes.io/projected/9baa9257-9a08-427f-bdec-1fa28a81303f-kube-api-access-j8wwl\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.283831 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.283847 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.283860 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9baa9257-9a08-427f-bdec-1fa28a81303f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.438132 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-6c876d64f-4c6rh" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.493346 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-58c87ffb89-4m2v2"] Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.493567 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-58c87ffb89-4m2v2" podUID="9eff8542-e352-4f68-bb2b-aa02df1e06f8" containerName="heat-engine" containerID="cri-o://77f7d30e110d99a17d1e200c8fcc890cd15ec0c5fe48e0b7a6db365309eae252" gracePeriod=60 Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.829626 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-5867565df6-2bb6n" event={"ID":"9baa9257-9a08-427f-bdec-1fa28a81303f","Type":"ContainerDied","Data":"c8d87fcda98a938f28cbc723669b880201a75fadf14823e8b5c8fa6076c8e8ea"} Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.830475 4884 scope.go:117] "RemoveContainer" containerID="b1ff2b7c8ccb1c6a9450addbd0b45a9673dc76ff8280b7e123b71a1411fbe766" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.829884 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-5867565df6-2bb6n" Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.910796 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-5867565df6-2bb6n"] Dec 10 00:56:42 crc kubenswrapper[4884]: I1210 00:56:42.920897 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-5867565df6-2bb6n"] Dec 10 00:56:43 crc kubenswrapper[4884]: I1210 00:56:43.299728 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73959270-9ff8-499d-8316-dd45b701ad6f" path="/var/lib/kubelet/pods/73959270-9ff8-499d-8316-dd45b701ad6f/volumes" Dec 10 00:56:43 crc kubenswrapper[4884]: I1210 00:56:43.300364 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9baa9257-9a08-427f-bdec-1fa28a81303f" path="/var/lib/kubelet/pods/9baa9257-9a08-427f-bdec-1fa28a81303f/volumes" Dec 10 00:56:44 crc kubenswrapper[4884]: E1210 00:56:44.800022 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="77f7d30e110d99a17d1e200c8fcc890cd15ec0c5fe48e0b7a6db365309eae252" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 10 00:56:44 crc kubenswrapper[4884]: E1210 00:56:44.805535 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="77f7d30e110d99a17d1e200c8fcc890cd15ec0c5fe48e0b7a6db365309eae252" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 10 00:56:44 crc kubenswrapper[4884]: E1210 00:56:44.813641 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="77f7d30e110d99a17d1e200c8fcc890cd15ec0c5fe48e0b7a6db365309eae252" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Dec 10 00:56:44 crc kubenswrapper[4884]: E1210 00:56:44.813689 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-58c87ffb89-4m2v2" podUID="9eff8542-e352-4f68-bb2b-aa02df1e06f8" containerName="heat-engine" Dec 10 00:56:45 crc kubenswrapper[4884]: I1210 00:56:45.891470 4884 generic.go:334] "Generic (PLEG): container finished" podID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerID="2aa7043edc526fac845b081dca5ecf1351756dbdf8416161b555ff9a7748ede0" exitCode=0 Dec 10 00:56:45 crc kubenswrapper[4884]: I1210 00:56:45.891581 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffc65c2a-883e-4939-8685-a0d77fd1c717","Type":"ContainerDied","Data":"2aa7043edc526fac845b081dca5ecf1351756dbdf8416161b555ff9a7748ede0"} Dec 10 00:56:48 crc kubenswrapper[4884]: I1210 00:56:48.288180 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:56:48 crc kubenswrapper[4884]: E1210 00:56:48.289241 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:56:50 crc kubenswrapper[4884]: I1210 00:56:50.883008 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:50 crc kubenswrapper[4884]: I1210 00:56:50.933106 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.133052 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fzs89"] Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.173123 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.257154 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-combined-ca-bundle\") pod \"ffc65c2a-883e-4939-8685-a0d77fd1c717\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.257392 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cd2m8\" (UniqueName: \"kubernetes.io/projected/ffc65c2a-883e-4939-8685-a0d77fd1c717-kube-api-access-cd2m8\") pod \"ffc65c2a-883e-4939-8685-a0d77fd1c717\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.257504 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-config-data\") pod \"ffc65c2a-883e-4939-8685-a0d77fd1c717\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.257596 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-sg-core-conf-yaml\") pod \"ffc65c2a-883e-4939-8685-a0d77fd1c717\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.257628 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-scripts\") pod \"ffc65c2a-883e-4939-8685-a0d77fd1c717\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.257672 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffc65c2a-883e-4939-8685-a0d77fd1c717-run-httpd\") pod \"ffc65c2a-883e-4939-8685-a0d77fd1c717\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.257766 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffc65c2a-883e-4939-8685-a0d77fd1c717-log-httpd\") pod \"ffc65c2a-883e-4939-8685-a0d77fd1c717\" (UID: \"ffc65c2a-883e-4939-8685-a0d77fd1c717\") " Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.258424 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffc65c2a-883e-4939-8685-a0d77fd1c717-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ffc65c2a-883e-4939-8685-a0d77fd1c717" (UID: "ffc65c2a-883e-4939-8685-a0d77fd1c717"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.258547 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffc65c2a-883e-4939-8685-a0d77fd1c717-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ffc65c2a-883e-4939-8685-a0d77fd1c717" (UID: "ffc65c2a-883e-4939-8685-a0d77fd1c717"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.263017 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffc65c2a-883e-4939-8685-a0d77fd1c717-kube-api-access-cd2m8" (OuterVolumeSpecName: "kube-api-access-cd2m8") pod "ffc65c2a-883e-4939-8685-a0d77fd1c717" (UID: "ffc65c2a-883e-4939-8685-a0d77fd1c717"). InnerVolumeSpecName "kube-api-access-cd2m8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.267627 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-scripts" (OuterVolumeSpecName: "scripts") pod "ffc65c2a-883e-4939-8685-a0d77fd1c717" (UID: "ffc65c2a-883e-4939-8685-a0d77fd1c717"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.322571 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ffc65c2a-883e-4939-8685-a0d77fd1c717" (UID: "ffc65c2a-883e-4939-8685-a0d77fd1c717"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.359412 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cd2m8\" (UniqueName: \"kubernetes.io/projected/ffc65c2a-883e-4939-8685-a0d77fd1c717-kube-api-access-cd2m8\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.359452 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.359463 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.359472 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffc65c2a-883e-4939-8685-a0d77fd1c717-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.359481 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ffc65c2a-883e-4939-8685-a0d77fd1c717-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.406624 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-config-data" (OuterVolumeSpecName: "config-data") pod "ffc65c2a-883e-4939-8685-a0d77fd1c717" (UID: "ffc65c2a-883e-4939-8685-a0d77fd1c717"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.407055 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ffc65c2a-883e-4939-8685-a0d77fd1c717" (UID: "ffc65c2a-883e-4939-8685-a0d77fd1c717"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.461118 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.461156 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffc65c2a-883e-4939-8685-a0d77fd1c717-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.953005 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ffc65c2a-883e-4939-8685-a0d77fd1c717","Type":"ContainerDied","Data":"57f3ca6ac92b18e8bfa43c024c9f662a7e7fb469a07b9822166b549849291ac8"} Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.953504 4884 scope.go:117] "RemoveContainer" containerID="0e46244df3dd76d9a6626efc1a2e6ebc4991f71ce466cf7df69cb6dab9641b27" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.954037 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.956405 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-f9dh7" event={"ID":"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670","Type":"ContainerStarted","Data":"5cda245efda47526d292cbb1c535c23938a0cab0db243ee95d554069dae871bf"} Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.956499 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fzs89" podUID="4518e9df-bc84-45c9-ae55-3af2cff56a19" containerName="registry-server" containerID="cri-o://2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008" gracePeriod=2 Dec 10 00:56:51 crc kubenswrapper[4884]: I1210 00:56:51.985612 4884 scope.go:117] "RemoveContainer" containerID="4a44aac043390f7847136233ea4c21a4c19cd7cde2b55da6bb01ed75fe7a6ae9" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.012057 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-f9dh7" podStartSLOduration=2.5822953650000002 podStartE2EDuration="14.012031281s" podCreationTimestamp="2025-12-10 00:56:38 +0000 UTC" firstStartedPulling="2025-12-10 00:56:39.659076903 +0000 UTC m=+1572.737034020" lastFinishedPulling="2025-12-10 00:56:51.088812819 +0000 UTC m=+1584.166769936" observedRunningTime="2025-12-10 00:56:51.988217536 +0000 UTC m=+1585.066174653" watchObservedRunningTime="2025-12-10 00:56:52.012031281 +0000 UTC m=+1585.089988398" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.012365 4884 scope.go:117] "RemoveContainer" containerID="4675e92b33c87f5c40458ff6f512488f0aaf538368438733586e9804d405fc1b" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.014852 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.029221 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.038227 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:56:52 crc kubenswrapper[4884]: E1210 00:56:52.038945 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9baa9257-9a08-427f-bdec-1fa28a81303f" containerName="heat-api" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.038962 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9baa9257-9a08-427f-bdec-1fa28a81303f" containerName="heat-api" Dec 10 00:56:52 crc kubenswrapper[4884]: E1210 00:56:52.038980 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73959270-9ff8-499d-8316-dd45b701ad6f" containerName="heat-cfnapi" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.038988 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="73959270-9ff8-499d-8316-dd45b701ad6f" containerName="heat-cfnapi" Dec 10 00:56:52 crc kubenswrapper[4884]: E1210 00:56:52.039002 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="ceilometer-central-agent" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039008 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="ceilometer-central-agent" Dec 10 00:56:52 crc kubenswrapper[4884]: E1210 00:56:52.039018 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="ceilometer-notification-agent" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039026 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="ceilometer-notification-agent" Dec 10 00:56:52 crc kubenswrapper[4884]: E1210 00:56:52.039041 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="sg-core" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039046 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="sg-core" Dec 10 00:56:52 crc kubenswrapper[4884]: E1210 00:56:52.039059 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="proxy-httpd" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039065 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="proxy-httpd" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039386 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="73959270-9ff8-499d-8316-dd45b701ad6f" containerName="heat-cfnapi" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039416 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="ceilometer-notification-agent" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039442 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="sg-core" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039453 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9baa9257-9a08-427f-bdec-1fa28a81303f" containerName="heat-api" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039468 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="ceilometer-central-agent" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039473 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9baa9257-9a08-427f-bdec-1fa28a81303f" containerName="heat-api" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039484 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" containerName="proxy-httpd" Dec 10 00:56:52 crc kubenswrapper[4884]: E1210 00:56:52.039672 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9baa9257-9a08-427f-bdec-1fa28a81303f" containerName="heat-api" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039680 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9baa9257-9a08-427f-bdec-1fa28a81303f" containerName="heat-api" Dec 10 00:56:52 crc kubenswrapper[4884]: E1210 00:56:52.039694 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73959270-9ff8-499d-8316-dd45b701ad6f" containerName="heat-cfnapi" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039701 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="73959270-9ff8-499d-8316-dd45b701ad6f" containerName="heat-cfnapi" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.039916 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="73959270-9ff8-499d-8316-dd45b701ad6f" containerName="heat-cfnapi" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.040003 4884 scope.go:117] "RemoveContainer" containerID="2aa7043edc526fac845b081dca5ecf1351756dbdf8416161b555ff9a7748ede0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.041357 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.043976 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.044409 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.051789 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.172586 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-config-data\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.172635 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.172821 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.172921 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-scripts\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.173070 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4z6q\" (UniqueName: \"kubernetes.io/projected/49b7e398-14c9-45e9-abdb-31c0431dee66-kube-api-access-v4z6q\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.173181 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49b7e398-14c9-45e9-abdb-31c0431dee66-run-httpd\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.173228 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49b7e398-14c9-45e9-abdb-31c0431dee66-log-httpd\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.274396 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.274495 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-scripts\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.274553 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4z6q\" (UniqueName: \"kubernetes.io/projected/49b7e398-14c9-45e9-abdb-31c0431dee66-kube-api-access-v4z6q\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.274601 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49b7e398-14c9-45e9-abdb-31c0431dee66-run-httpd\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.274622 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49b7e398-14c9-45e9-abdb-31c0431dee66-log-httpd\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.274652 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-config-data\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.274672 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.284119 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.284389 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49b7e398-14c9-45e9-abdb-31c0431dee66-run-httpd\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.284633 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49b7e398-14c9-45e9-abdb-31c0431dee66-log-httpd\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.286678 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.289280 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-config-data\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.302857 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-scripts\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.319195 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4z6q\" (UniqueName: \"kubernetes.io/projected/49b7e398-14c9-45e9-abdb-31c0431dee66-kube-api-access-v4z6q\") pod \"ceilometer-0\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.386489 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.510342 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.583529 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4518e9df-bc84-45c9-ae55-3af2cff56a19-utilities\") pod \"4518e9df-bc84-45c9-ae55-3af2cff56a19\" (UID: \"4518e9df-bc84-45c9-ae55-3af2cff56a19\") " Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.583605 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4518e9df-bc84-45c9-ae55-3af2cff56a19-catalog-content\") pod \"4518e9df-bc84-45c9-ae55-3af2cff56a19\" (UID: \"4518e9df-bc84-45c9-ae55-3af2cff56a19\") " Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.583656 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk6xf\" (UniqueName: \"kubernetes.io/projected/4518e9df-bc84-45c9-ae55-3af2cff56a19-kube-api-access-tk6xf\") pod \"4518e9df-bc84-45c9-ae55-3af2cff56a19\" (UID: \"4518e9df-bc84-45c9-ae55-3af2cff56a19\") " Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.585392 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4518e9df-bc84-45c9-ae55-3af2cff56a19-utilities" (OuterVolumeSpecName: "utilities") pod "4518e9df-bc84-45c9-ae55-3af2cff56a19" (UID: "4518e9df-bc84-45c9-ae55-3af2cff56a19"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.588422 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4518e9df-bc84-45c9-ae55-3af2cff56a19-kube-api-access-tk6xf" (OuterVolumeSpecName: "kube-api-access-tk6xf") pod "4518e9df-bc84-45c9-ae55-3af2cff56a19" (UID: "4518e9df-bc84-45c9-ae55-3af2cff56a19"). InnerVolumeSpecName "kube-api-access-tk6xf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.658757 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4518e9df-bc84-45c9-ae55-3af2cff56a19-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4518e9df-bc84-45c9-ae55-3af2cff56a19" (UID: "4518e9df-bc84-45c9-ae55-3af2cff56a19"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.687499 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4518e9df-bc84-45c9-ae55-3af2cff56a19-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.687546 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4518e9df-bc84-45c9-ae55-3af2cff56a19-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.687562 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk6xf\" (UniqueName: \"kubernetes.io/projected/4518e9df-bc84-45c9-ae55-3af2cff56a19-kube-api-access-tk6xf\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:52 crc kubenswrapper[4884]: I1210 00:56:52.873540 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:56:52 crc kubenswrapper[4884]: W1210 00:56:52.877891 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49b7e398_14c9_45e9_abdb_31c0431dee66.slice/crio-87290e99cd9971d0b29d3da7ae91d7cc070c4c8e416afeca78dd7e2ce4ce8629 WatchSource:0}: Error finding container 87290e99cd9971d0b29d3da7ae91d7cc070c4c8e416afeca78dd7e2ce4ce8629: Status 404 returned error can't find the container with id 87290e99cd9971d0b29d3da7ae91d7cc070c4c8e416afeca78dd7e2ce4ce8629 Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.000260 4884 generic.go:334] "Generic (PLEG): container finished" podID="4518e9df-bc84-45c9-ae55-3af2cff56a19" containerID="2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008" exitCode=0 Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.000330 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fzs89" event={"ID":"4518e9df-bc84-45c9-ae55-3af2cff56a19","Type":"ContainerDied","Data":"2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008"} Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.000391 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fzs89" event={"ID":"4518e9df-bc84-45c9-ae55-3af2cff56a19","Type":"ContainerDied","Data":"a64e5122aa68fa67273d6c9d69df22714c7b7edca57e53eac8e31f82ee3afaa2"} Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.000398 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fzs89" Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.000412 4884 scope.go:117] "RemoveContainer" containerID="2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008" Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.007692 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49b7e398-14c9-45e9-abdb-31c0431dee66","Type":"ContainerStarted","Data":"87290e99cd9971d0b29d3da7ae91d7cc070c4c8e416afeca78dd7e2ce4ce8629"} Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.022039 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.030311 4884 scope.go:117] "RemoveContainer" containerID="56a3b8fbd64acee14002b0b04e72a9b51f8785a8dc7a25b1d052ab6da0edb1d5" Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.043485 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fzs89"] Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.056094 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fzs89"] Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.057686 4884 scope.go:117] "RemoveContainer" containerID="56f9b6221008ae374ce3b91142a02d2c5745a3cc0899006e0615c3b8c1037b1c" Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.079968 4884 scope.go:117] "RemoveContainer" containerID="2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008" Dec 10 00:56:53 crc kubenswrapper[4884]: E1210 00:56:53.083627 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008\": container with ID starting with 2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008 not found: ID does not exist" containerID="2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008" Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.083673 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008"} err="failed to get container status \"2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008\": rpc error: code = NotFound desc = could not find container \"2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008\": container with ID starting with 2c2380f8f9aaa434e80d6f1bc911252731a0a67efe0bd24bdec919b549b84008 not found: ID does not exist" Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.083702 4884 scope.go:117] "RemoveContainer" containerID="56a3b8fbd64acee14002b0b04e72a9b51f8785a8dc7a25b1d052ab6da0edb1d5" Dec 10 00:56:53 crc kubenswrapper[4884]: E1210 00:56:53.084344 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56a3b8fbd64acee14002b0b04e72a9b51f8785a8dc7a25b1d052ab6da0edb1d5\": container with ID starting with 56a3b8fbd64acee14002b0b04e72a9b51f8785a8dc7a25b1d052ab6da0edb1d5 not found: ID does not exist" containerID="56a3b8fbd64acee14002b0b04e72a9b51f8785a8dc7a25b1d052ab6da0edb1d5" Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.084382 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56a3b8fbd64acee14002b0b04e72a9b51f8785a8dc7a25b1d052ab6da0edb1d5"} err="failed to get container status \"56a3b8fbd64acee14002b0b04e72a9b51f8785a8dc7a25b1d052ab6da0edb1d5\": rpc error: code = NotFound desc = could not find container \"56a3b8fbd64acee14002b0b04e72a9b51f8785a8dc7a25b1d052ab6da0edb1d5\": container with ID starting with 56a3b8fbd64acee14002b0b04e72a9b51f8785a8dc7a25b1d052ab6da0edb1d5 not found: ID does not exist" Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.084396 4884 scope.go:117] "RemoveContainer" containerID="56f9b6221008ae374ce3b91142a02d2c5745a3cc0899006e0615c3b8c1037b1c" Dec 10 00:56:53 crc kubenswrapper[4884]: E1210 00:56:53.084638 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56f9b6221008ae374ce3b91142a02d2c5745a3cc0899006e0615c3b8c1037b1c\": container with ID starting with 56f9b6221008ae374ce3b91142a02d2c5745a3cc0899006e0615c3b8c1037b1c not found: ID does not exist" containerID="56f9b6221008ae374ce3b91142a02d2c5745a3cc0899006e0615c3b8c1037b1c" Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.084656 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56f9b6221008ae374ce3b91142a02d2c5745a3cc0899006e0615c3b8c1037b1c"} err="failed to get container status \"56f9b6221008ae374ce3b91142a02d2c5745a3cc0899006e0615c3b8c1037b1c\": rpc error: code = NotFound desc = could not find container \"56f9b6221008ae374ce3b91142a02d2c5745a3cc0899006e0615c3b8c1037b1c\": container with ID starting with 56f9b6221008ae374ce3b91142a02d2c5745a3cc0899006e0615c3b8c1037b1c not found: ID does not exist" Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.313532 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4518e9df-bc84-45c9-ae55-3af2cff56a19" path="/var/lib/kubelet/pods/4518e9df-bc84-45c9-ae55-3af2cff56a19/volumes" Dec 10 00:56:53 crc kubenswrapper[4884]: I1210 00:56:53.315338 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffc65c2a-883e-4939-8685-a0d77fd1c717" path="/var/lib/kubelet/pods/ffc65c2a-883e-4939-8685-a0d77fd1c717/volumes" Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.018189 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49b7e398-14c9-45e9-abdb-31c0431dee66","Type":"ContainerStarted","Data":"bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688"} Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.021861 4884 generic.go:334] "Generic (PLEG): container finished" podID="9eff8542-e352-4f68-bb2b-aa02df1e06f8" containerID="77f7d30e110d99a17d1e200c8fcc890cd15ec0c5fe48e0b7a6db365309eae252" exitCode=0 Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.021913 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-58c87ffb89-4m2v2" event={"ID":"9eff8542-e352-4f68-bb2b-aa02df1e06f8","Type":"ContainerDied","Data":"77f7d30e110d99a17d1e200c8fcc890cd15ec0c5fe48e0b7a6db365309eae252"} Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.471866 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.528209 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-config-data-custom\") pod \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.528298 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-config-data\") pod \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.528500 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-combined-ca-bundle\") pod \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.528542 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2gd6c\" (UniqueName: \"kubernetes.io/projected/9eff8542-e352-4f68-bb2b-aa02df1e06f8-kube-api-access-2gd6c\") pod \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\" (UID: \"9eff8542-e352-4f68-bb2b-aa02df1e06f8\") " Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.539368 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9eff8542-e352-4f68-bb2b-aa02df1e06f8-kube-api-access-2gd6c" (OuterVolumeSpecName: "kube-api-access-2gd6c") pod "9eff8542-e352-4f68-bb2b-aa02df1e06f8" (UID: "9eff8542-e352-4f68-bb2b-aa02df1e06f8"). InnerVolumeSpecName "kube-api-access-2gd6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.540603 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9eff8542-e352-4f68-bb2b-aa02df1e06f8" (UID: "9eff8542-e352-4f68-bb2b-aa02df1e06f8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.577015 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9eff8542-e352-4f68-bb2b-aa02df1e06f8" (UID: "9eff8542-e352-4f68-bb2b-aa02df1e06f8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.598855 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-config-data" (OuterVolumeSpecName: "config-data") pod "9eff8542-e352-4f68-bb2b-aa02df1e06f8" (UID: "9eff8542-e352-4f68-bb2b-aa02df1e06f8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.630682 4884 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.630714 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.630723 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9eff8542-e352-4f68-bb2b-aa02df1e06f8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:54 crc kubenswrapper[4884]: I1210 00:56:54.630732 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2gd6c\" (UniqueName: \"kubernetes.io/projected/9eff8542-e352-4f68-bb2b-aa02df1e06f8-kube-api-access-2gd6c\") on node \"crc\" DevicePath \"\"" Dec 10 00:56:55 crc kubenswrapper[4884]: I1210 00:56:55.043078 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-58c87ffb89-4m2v2" event={"ID":"9eff8542-e352-4f68-bb2b-aa02df1e06f8","Type":"ContainerDied","Data":"a161d9c7379232224139d9ad222216f92ed54460dd8444af97e828a61b521372"} Dec 10 00:56:55 crc kubenswrapper[4884]: I1210 00:56:55.043139 4884 scope.go:117] "RemoveContainer" containerID="77f7d30e110d99a17d1e200c8fcc890cd15ec0c5fe48e0b7a6db365309eae252" Dec 10 00:56:55 crc kubenswrapper[4884]: I1210 00:56:55.043290 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-58c87ffb89-4m2v2" Dec 10 00:56:55 crc kubenswrapper[4884]: I1210 00:56:55.055741 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49b7e398-14c9-45e9-abdb-31c0431dee66","Type":"ContainerStarted","Data":"5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca"} Dec 10 00:56:55 crc kubenswrapper[4884]: I1210 00:56:55.089334 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-58c87ffb89-4m2v2"] Dec 10 00:56:55 crc kubenswrapper[4884]: I1210 00:56:55.100673 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-58c87ffb89-4m2v2"] Dec 10 00:56:55 crc kubenswrapper[4884]: I1210 00:56:55.300496 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9eff8542-e352-4f68-bb2b-aa02df1e06f8" path="/var/lib/kubelet/pods/9eff8542-e352-4f68-bb2b-aa02df1e06f8/volumes" Dec 10 00:56:56 crc kubenswrapper[4884]: I1210 00:56:56.068131 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49b7e398-14c9-45e9-abdb-31c0431dee66","Type":"ContainerStarted","Data":"87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077"} Dec 10 00:56:57 crc kubenswrapper[4884]: I1210 00:56:57.080042 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49b7e398-14c9-45e9-abdb-31c0431dee66","Type":"ContainerStarted","Data":"e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362"} Dec 10 00:56:57 crc kubenswrapper[4884]: I1210 00:56:57.080362 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="ceilometer-central-agent" containerID="cri-o://bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688" gracePeriod=30 Dec 10 00:56:57 crc kubenswrapper[4884]: I1210 00:56:57.080402 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 00:56:57 crc kubenswrapper[4884]: I1210 00:56:57.080414 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="sg-core" containerID="cri-o://87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077" gracePeriod=30 Dec 10 00:56:57 crc kubenswrapper[4884]: I1210 00:56:57.080412 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="proxy-httpd" containerID="cri-o://e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362" gracePeriod=30 Dec 10 00:56:57 crc kubenswrapper[4884]: I1210 00:56:57.080475 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="ceilometer-notification-agent" containerID="cri-o://5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca" gracePeriod=30 Dec 10 00:56:57 crc kubenswrapper[4884]: I1210 00:56:57.104916 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.400725529 podStartE2EDuration="5.104900521s" podCreationTimestamp="2025-12-10 00:56:52 +0000 UTC" firstStartedPulling="2025-12-10 00:56:52.880574262 +0000 UTC m=+1585.958531379" lastFinishedPulling="2025-12-10 00:56:56.584749244 +0000 UTC m=+1589.662706371" observedRunningTime="2025-12-10 00:56:57.10228251 +0000 UTC m=+1590.180239647" watchObservedRunningTime="2025-12-10 00:56:57.104900521 +0000 UTC m=+1590.182857638" Dec 10 00:56:58 crc kubenswrapper[4884]: I1210 00:56:58.091336 4884 generic.go:334] "Generic (PLEG): container finished" podID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerID="e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362" exitCode=0 Dec 10 00:56:58 crc kubenswrapper[4884]: I1210 00:56:58.091604 4884 generic.go:334] "Generic (PLEG): container finished" podID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerID="87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077" exitCode=2 Dec 10 00:56:58 crc kubenswrapper[4884]: I1210 00:56:58.091615 4884 generic.go:334] "Generic (PLEG): container finished" podID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerID="5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca" exitCode=0 Dec 10 00:56:58 crc kubenswrapper[4884]: I1210 00:56:58.091500 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49b7e398-14c9-45e9-abdb-31c0431dee66","Type":"ContainerDied","Data":"e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362"} Dec 10 00:56:58 crc kubenswrapper[4884]: I1210 00:56:58.091648 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49b7e398-14c9-45e9-abdb-31c0431dee66","Type":"ContainerDied","Data":"87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077"} Dec 10 00:56:58 crc kubenswrapper[4884]: I1210 00:56:58.091663 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49b7e398-14c9-45e9-abdb-31c0431dee66","Type":"ContainerDied","Data":"5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca"} Dec 10 00:57:01 crc kubenswrapper[4884]: I1210 00:57:01.287458 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:57:01 crc kubenswrapper[4884]: E1210 00:57:01.288221 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.157281 4884 generic.go:334] "Generic (PLEG): container finished" podID="5950ff0d-ec3f-4bb0-8cdf-b2a536c71670" containerID="5cda245efda47526d292cbb1c535c23938a0cab0db243ee95d554069dae871bf" exitCode=0 Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.157374 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-f9dh7" event={"ID":"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670","Type":"ContainerDied","Data":"5cda245efda47526d292cbb1c535c23938a0cab0db243ee95d554069dae871bf"} Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.630315 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.733293 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4z6q\" (UniqueName: \"kubernetes.io/projected/49b7e398-14c9-45e9-abdb-31c0431dee66-kube-api-access-v4z6q\") pod \"49b7e398-14c9-45e9-abdb-31c0431dee66\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.733371 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49b7e398-14c9-45e9-abdb-31c0431dee66-log-httpd\") pod \"49b7e398-14c9-45e9-abdb-31c0431dee66\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.733424 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49b7e398-14c9-45e9-abdb-31c0431dee66-run-httpd\") pod \"49b7e398-14c9-45e9-abdb-31c0431dee66\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.733491 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-sg-core-conf-yaml\") pod \"49b7e398-14c9-45e9-abdb-31c0431dee66\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.733535 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-config-data\") pod \"49b7e398-14c9-45e9-abdb-31c0431dee66\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.733570 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-scripts\") pod \"49b7e398-14c9-45e9-abdb-31c0431dee66\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.733595 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-combined-ca-bundle\") pod \"49b7e398-14c9-45e9-abdb-31c0431dee66\" (UID: \"49b7e398-14c9-45e9-abdb-31c0431dee66\") " Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.736741 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49b7e398-14c9-45e9-abdb-31c0431dee66-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "49b7e398-14c9-45e9-abdb-31c0431dee66" (UID: "49b7e398-14c9-45e9-abdb-31c0431dee66"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.737298 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/49b7e398-14c9-45e9-abdb-31c0431dee66-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "49b7e398-14c9-45e9-abdb-31c0431dee66" (UID: "49b7e398-14c9-45e9-abdb-31c0431dee66"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.739506 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-scripts" (OuterVolumeSpecName: "scripts") pod "49b7e398-14c9-45e9-abdb-31c0431dee66" (UID: "49b7e398-14c9-45e9-abdb-31c0431dee66"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.741643 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49b7e398-14c9-45e9-abdb-31c0431dee66-kube-api-access-v4z6q" (OuterVolumeSpecName: "kube-api-access-v4z6q") pod "49b7e398-14c9-45e9-abdb-31c0431dee66" (UID: "49b7e398-14c9-45e9-abdb-31c0431dee66"). InnerVolumeSpecName "kube-api-access-v4z6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.764680 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "49b7e398-14c9-45e9-abdb-31c0431dee66" (UID: "49b7e398-14c9-45e9-abdb-31c0431dee66"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.815775 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "49b7e398-14c9-45e9-abdb-31c0431dee66" (UID: "49b7e398-14c9-45e9-abdb-31c0431dee66"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.836566 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4z6q\" (UniqueName: \"kubernetes.io/projected/49b7e398-14c9-45e9-abdb-31c0431dee66-kube-api-access-v4z6q\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.836598 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49b7e398-14c9-45e9-abdb-31c0431dee66-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.836607 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/49b7e398-14c9-45e9-abdb-31c0431dee66-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.836617 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.836625 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.836634 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.840669 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-config-data" (OuterVolumeSpecName: "config-data") pod "49b7e398-14c9-45e9-abdb-31c0431dee66" (UID: "49b7e398-14c9-45e9-abdb-31c0431dee66"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:03 crc kubenswrapper[4884]: I1210 00:57:03.938843 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49b7e398-14c9-45e9-abdb-31c0431dee66-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.174593 4884 generic.go:334] "Generic (PLEG): container finished" podID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerID="bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688" exitCode=0 Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.174785 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.174869 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49b7e398-14c9-45e9-abdb-31c0431dee66","Type":"ContainerDied","Data":"bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688"} Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.174916 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"49b7e398-14c9-45e9-abdb-31c0431dee66","Type":"ContainerDied","Data":"87290e99cd9971d0b29d3da7ae91d7cc070c4c8e416afeca78dd7e2ce4ce8629"} Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.174945 4884 scope.go:117] "RemoveContainer" containerID="e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.208778 4884 scope.go:117] "RemoveContainer" containerID="87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.278058 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.287627 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.315624 4884 scope.go:117] "RemoveContainer" containerID="5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.346295 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:04 crc kubenswrapper[4884]: E1210 00:57:04.347031 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4518e9df-bc84-45c9-ae55-3af2cff56a19" containerName="extract-utilities" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.347051 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4518e9df-bc84-45c9-ae55-3af2cff56a19" containerName="extract-utilities" Dec 10 00:57:04 crc kubenswrapper[4884]: E1210 00:57:04.347071 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4518e9df-bc84-45c9-ae55-3af2cff56a19" containerName="registry-server" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.347080 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4518e9df-bc84-45c9-ae55-3af2cff56a19" containerName="registry-server" Dec 10 00:57:04 crc kubenswrapper[4884]: E1210 00:57:04.347127 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4518e9df-bc84-45c9-ae55-3af2cff56a19" containerName="extract-content" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.347136 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="4518e9df-bc84-45c9-ae55-3af2cff56a19" containerName="extract-content" Dec 10 00:57:04 crc kubenswrapper[4884]: E1210 00:57:04.347146 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="ceilometer-notification-agent" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.347154 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="ceilometer-notification-agent" Dec 10 00:57:04 crc kubenswrapper[4884]: E1210 00:57:04.347199 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9eff8542-e352-4f68-bb2b-aa02df1e06f8" containerName="heat-engine" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.347208 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="9eff8542-e352-4f68-bb2b-aa02df1e06f8" containerName="heat-engine" Dec 10 00:57:04 crc kubenswrapper[4884]: E1210 00:57:04.347219 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="ceilometer-central-agent" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.347227 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="ceilometer-central-agent" Dec 10 00:57:04 crc kubenswrapper[4884]: E1210 00:57:04.347242 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="proxy-httpd" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.347270 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="proxy-httpd" Dec 10 00:57:04 crc kubenswrapper[4884]: E1210 00:57:04.347285 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="sg-core" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.347292 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="sg-core" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.351561 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="sg-core" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.351649 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="proxy-httpd" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.351667 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="ceilometer-central-agent" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.351678 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="9eff8542-e352-4f68-bb2b-aa02df1e06f8" containerName="heat-engine" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.351692 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="4518e9df-bc84-45c9-ae55-3af2cff56a19" containerName="registry-server" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.351733 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" containerName="ceilometer-notification-agent" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.354895 4884 scope.go:117] "RemoveContainer" containerID="bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.355150 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.370355 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.371695 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.371907 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.440838 4884 scope.go:117] "RemoveContainer" containerID="e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362" Dec 10 00:57:04 crc kubenswrapper[4884]: E1210 00:57:04.441272 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362\": container with ID starting with e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362 not found: ID does not exist" containerID="e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.441302 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362"} err="failed to get container status \"e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362\": rpc error: code = NotFound desc = could not find container \"e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362\": container with ID starting with e2429b1c932e72b61cafd2d08c0d93f25ea4bbafb90ac6a8cf36cf39f485d362 not found: ID does not exist" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.441323 4884 scope.go:117] "RemoveContainer" containerID="87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077" Dec 10 00:57:04 crc kubenswrapper[4884]: E1210 00:57:04.441648 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077\": container with ID starting with 87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077 not found: ID does not exist" containerID="87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.441668 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077"} err="failed to get container status \"87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077\": rpc error: code = NotFound desc = could not find container \"87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077\": container with ID starting with 87134e90c07eff36536cef515a750ec7ce9aab9def653a09f44622ce4159d077 not found: ID does not exist" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.441680 4884 scope.go:117] "RemoveContainer" containerID="5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca" Dec 10 00:57:04 crc kubenswrapper[4884]: E1210 00:57:04.441847 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca\": container with ID starting with 5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca not found: ID does not exist" containerID="5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.441865 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca"} err="failed to get container status \"5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca\": rpc error: code = NotFound desc = could not find container \"5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca\": container with ID starting with 5d3d5cb424252521e8120f56f8469187d40f76ddb9f89adfce27e54924a660ca not found: ID does not exist" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.441885 4884 scope.go:117] "RemoveContainer" containerID="bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688" Dec 10 00:57:04 crc kubenswrapper[4884]: E1210 00:57:04.442045 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688\": container with ID starting with bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688 not found: ID does not exist" containerID="bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.442064 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688"} err="failed to get container status \"bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688\": rpc error: code = NotFound desc = could not find container \"bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688\": container with ID starting with bd04ba7401acc4b3a3cbebeb37571249415de684b8dcc09aa6e90a1804b8b688 not found: ID does not exist" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.447637 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.447717 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8a8c712-66f2-48ad-857f-8ed05ef28076-run-httpd\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.447769 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-scripts\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.447804 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.447822 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-config-data\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.447853 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvk6w\" (UniqueName: \"kubernetes.io/projected/a8a8c712-66f2-48ad-857f-8ed05ef28076-kube-api-access-lvk6w\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.447923 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8a8c712-66f2-48ad-857f-8ed05ef28076-log-httpd\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.549757 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-scripts\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.549819 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.549840 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-config-data\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.549872 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvk6w\" (UniqueName: \"kubernetes.io/projected/a8a8c712-66f2-48ad-857f-8ed05ef28076-kube-api-access-lvk6w\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.549931 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8a8c712-66f2-48ad-857f-8ed05ef28076-log-httpd\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.550000 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.550022 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8a8c712-66f2-48ad-857f-8ed05ef28076-run-httpd\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.550560 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8a8c712-66f2-48ad-857f-8ed05ef28076-run-httpd\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.550660 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8a8c712-66f2-48ad-857f-8ed05ef28076-log-httpd\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.556760 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.557863 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.559262 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-config-data\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.559971 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-scripts\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.568259 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvk6w\" (UniqueName: \"kubernetes.io/projected/a8a8c712-66f2-48ad-857f-8ed05ef28076-kube-api-access-lvk6w\") pod \"ceilometer-0\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.647125 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.696509 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.752936 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-combined-ca-bundle\") pod \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.753117 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-config-data\") pod \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.753262 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-scripts\") pod \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.753330 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvzpr\" (UniqueName: \"kubernetes.io/projected/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-kube-api-access-jvzpr\") pod \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\" (UID: \"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670\") " Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.757371 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-kube-api-access-jvzpr" (OuterVolumeSpecName: "kube-api-access-jvzpr") pod "5950ff0d-ec3f-4bb0-8cdf-b2a536c71670" (UID: "5950ff0d-ec3f-4bb0-8cdf-b2a536c71670"). InnerVolumeSpecName "kube-api-access-jvzpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.758241 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-scripts" (OuterVolumeSpecName: "scripts") pod "5950ff0d-ec3f-4bb0-8cdf-b2a536c71670" (UID: "5950ff0d-ec3f-4bb0-8cdf-b2a536c71670"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.785735 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-config-data" (OuterVolumeSpecName: "config-data") pod "5950ff0d-ec3f-4bb0-8cdf-b2a536c71670" (UID: "5950ff0d-ec3f-4bb0-8cdf-b2a536c71670"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.790922 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5950ff0d-ec3f-4bb0-8cdf-b2a536c71670" (UID: "5950ff0d-ec3f-4bb0-8cdf-b2a536c71670"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.855446 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.855800 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.855811 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:04 crc kubenswrapper[4884]: I1210 00:57:04.855820 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvzpr\" (UniqueName: \"kubernetes.io/projected/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670-kube-api-access-jvzpr\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.164098 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.202564 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-f9dh7" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.202492 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-f9dh7" event={"ID":"5950ff0d-ec3f-4bb0-8cdf-b2a536c71670","Type":"ContainerDied","Data":"31d275fe4e436dcd73ea923d457d6c1fb1e78e6fc3caca1a35bafde7311c943a"} Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.217611 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31d275fe4e436dcd73ea923d457d6c1fb1e78e6fc3caca1a35bafde7311c943a" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.305395 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49b7e398-14c9-45e9-abdb-31c0431dee66" path="/var/lib/kubelet/pods/49b7e398-14c9-45e9-abdb-31c0431dee66/volumes" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.306620 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 00:57:05 crc kubenswrapper[4884]: E1210 00:57:05.306961 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5950ff0d-ec3f-4bb0-8cdf-b2a536c71670" containerName="nova-cell0-conductor-db-sync" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.306978 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5950ff0d-ec3f-4bb0-8cdf-b2a536c71670" containerName="nova-cell0-conductor-db-sync" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.307168 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5950ff0d-ec3f-4bb0-8cdf-b2a536c71670" containerName="nova-cell0-conductor-db-sync" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.307843 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.310177 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.310206 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.313676 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mdmcl" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.365759 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4g8xl\" (UniqueName: \"kubernetes.io/projected/23ada451-7f1d-4445-b4e9-74fed45cdbcd-kube-api-access-4g8xl\") pod \"nova-cell0-conductor-0\" (UID: \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.366018 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23ada451-7f1d-4445-b4e9-74fed45cdbcd-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.366165 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23ada451-7f1d-4445-b4e9-74fed45cdbcd-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.467898 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4g8xl\" (UniqueName: \"kubernetes.io/projected/23ada451-7f1d-4445-b4e9-74fed45cdbcd-kube-api-access-4g8xl\") pod \"nova-cell0-conductor-0\" (UID: \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.467937 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23ada451-7f1d-4445-b4e9-74fed45cdbcd-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.467974 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23ada451-7f1d-4445-b4e9-74fed45cdbcd-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.473412 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23ada451-7f1d-4445-b4e9-74fed45cdbcd-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.473938 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23ada451-7f1d-4445-b4e9-74fed45cdbcd-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.483994 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4g8xl\" (UniqueName: \"kubernetes.io/projected/23ada451-7f1d-4445-b4e9-74fed45cdbcd-kube-api-access-4g8xl\") pod \"nova-cell0-conductor-0\" (UID: \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:05 crc kubenswrapper[4884]: I1210 00:57:05.631285 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:06 crc kubenswrapper[4884]: I1210 00:57:06.168315 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 00:57:06 crc kubenswrapper[4884]: W1210 00:57:06.169552 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod23ada451_7f1d_4445_b4e9_74fed45cdbcd.slice/crio-c5a1637a269188a480cd2c27bc47fba83aa67aee93d8026c57a89e40126bdf8a WatchSource:0}: Error finding container c5a1637a269188a480cd2c27bc47fba83aa67aee93d8026c57a89e40126bdf8a: Status 404 returned error can't find the container with id c5a1637a269188a480cd2c27bc47fba83aa67aee93d8026c57a89e40126bdf8a Dec 10 00:57:06 crc kubenswrapper[4884]: I1210 00:57:06.217318 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"23ada451-7f1d-4445-b4e9-74fed45cdbcd","Type":"ContainerStarted","Data":"c5a1637a269188a480cd2c27bc47fba83aa67aee93d8026c57a89e40126bdf8a"} Dec 10 00:57:06 crc kubenswrapper[4884]: I1210 00:57:06.219395 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8a8c712-66f2-48ad-857f-8ed05ef28076","Type":"ContainerStarted","Data":"53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033"} Dec 10 00:57:06 crc kubenswrapper[4884]: I1210 00:57:06.219422 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8a8c712-66f2-48ad-857f-8ed05ef28076","Type":"ContainerStarted","Data":"87aa17f8f1f8f683421f02937e7697664794e394bbcaf9d5efa375965bc8eaad"} Dec 10 00:57:07 crc kubenswrapper[4884]: I1210 00:57:07.227931 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"23ada451-7f1d-4445-b4e9-74fed45cdbcd","Type":"ContainerStarted","Data":"1d728d06f688764094d342955ab02fc1ff9a6ecb19dc3a46e66ba932b00dc6fd"} Dec 10 00:57:07 crc kubenswrapper[4884]: I1210 00:57:07.228377 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:07 crc kubenswrapper[4884]: I1210 00:57:07.230506 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8a8c712-66f2-48ad-857f-8ed05ef28076","Type":"ContainerStarted","Data":"be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38"} Dec 10 00:57:07 crc kubenswrapper[4884]: I1210 00:57:07.256351 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.25632517 podStartE2EDuration="2.25632517s" podCreationTimestamp="2025-12-10 00:57:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:57:07.244002845 +0000 UTC m=+1600.321959962" watchObservedRunningTime="2025-12-10 00:57:07.25632517 +0000 UTC m=+1600.334282287" Dec 10 00:57:08 crc kubenswrapper[4884]: I1210 00:57:08.767246 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:10 crc kubenswrapper[4884]: I1210 00:57:10.279558 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8a8c712-66f2-48ad-857f-8ed05ef28076","Type":"ContainerStarted","Data":"c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d"} Dec 10 00:57:11 crc kubenswrapper[4884]: I1210 00:57:11.293115 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="ceilometer-central-agent" containerID="cri-o://53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033" gracePeriod=30 Dec 10 00:57:11 crc kubenswrapper[4884]: I1210 00:57:11.293221 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="ceilometer-notification-agent" containerID="cri-o://be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38" gracePeriod=30 Dec 10 00:57:11 crc kubenswrapper[4884]: I1210 00:57:11.293477 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="sg-core" containerID="cri-o://c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d" gracePeriod=30 Dec 10 00:57:11 crc kubenswrapper[4884]: I1210 00:57:11.293238 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="proxy-httpd" containerID="cri-o://f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b" gracePeriod=30 Dec 10 00:57:11 crc kubenswrapper[4884]: I1210 00:57:11.310865 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8a8c712-66f2-48ad-857f-8ed05ef28076","Type":"ContainerStarted","Data":"f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b"} Dec 10 00:57:11 crc kubenswrapper[4884]: I1210 00:57:11.311058 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 00:57:11 crc kubenswrapper[4884]: I1210 00:57:11.321017 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.082985355 podStartE2EDuration="7.320976614s" podCreationTimestamp="2025-12-10 00:57:04 +0000 UTC" firstStartedPulling="2025-12-10 00:57:05.191042939 +0000 UTC m=+1598.269000056" lastFinishedPulling="2025-12-10 00:57:10.429034198 +0000 UTC m=+1603.506991315" observedRunningTime="2025-12-10 00:57:11.318515017 +0000 UTC m=+1604.396472154" watchObservedRunningTime="2025-12-10 00:57:11.320976614 +0000 UTC m=+1604.398933751" Dec 10 00:57:11 crc kubenswrapper[4884]: I1210 00:57:11.432068 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 00:57:11 crc kubenswrapper[4884]: I1210 00:57:11.432252 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="23ada451-7f1d-4445-b4e9-74fed45cdbcd" containerName="nova-cell0-conductor-conductor" containerID="cri-o://1d728d06f688764094d342955ab02fc1ff9a6ecb19dc3a46e66ba932b00dc6fd" gracePeriod=30 Dec 10 00:57:11 crc kubenswrapper[4884]: E1210 00:57:11.434538 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1d728d06f688764094d342955ab02fc1ff9a6ecb19dc3a46e66ba932b00dc6fd" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 10 00:57:11 crc kubenswrapper[4884]: E1210 00:57:11.439547 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1d728d06f688764094d342955ab02fc1ff9a6ecb19dc3a46e66ba932b00dc6fd" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 10 00:57:11 crc kubenswrapper[4884]: E1210 00:57:11.441946 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1d728d06f688764094d342955ab02fc1ff9a6ecb19dc3a46e66ba932b00dc6fd" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 10 00:57:11 crc kubenswrapper[4884]: E1210 00:57:11.442061 4884 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="23ada451-7f1d-4445-b4e9-74fed45cdbcd" containerName="nova-cell0-conductor-conductor" Dec 10 00:57:12 crc kubenswrapper[4884]: I1210 00:57:12.303555 4884 generic.go:334] "Generic (PLEG): container finished" podID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerID="f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b" exitCode=0 Dec 10 00:57:12 crc kubenswrapper[4884]: I1210 00:57:12.303810 4884 generic.go:334] "Generic (PLEG): container finished" podID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerID="c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d" exitCode=2 Dec 10 00:57:12 crc kubenswrapper[4884]: I1210 00:57:12.303821 4884 generic.go:334] "Generic (PLEG): container finished" podID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerID="be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38" exitCode=0 Dec 10 00:57:12 crc kubenswrapper[4884]: I1210 00:57:12.303711 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8a8c712-66f2-48ad-857f-8ed05ef28076","Type":"ContainerDied","Data":"f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b"} Dec 10 00:57:12 crc kubenswrapper[4884]: I1210 00:57:12.303854 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8a8c712-66f2-48ad-857f-8ed05ef28076","Type":"ContainerDied","Data":"c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d"} Dec 10 00:57:12 crc kubenswrapper[4884]: I1210 00:57:12.303867 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8a8c712-66f2-48ad-857f-8ed05ef28076","Type":"ContainerDied","Data":"be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38"} Dec 10 00:57:13 crc kubenswrapper[4884]: I1210 00:57:13.287457 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:57:13 crc kubenswrapper[4884]: E1210 00:57:13.287672 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:57:14 crc kubenswrapper[4884]: I1210 00:57:14.322795 4884 generic.go:334] "Generic (PLEG): container finished" podID="23ada451-7f1d-4445-b4e9-74fed45cdbcd" containerID="1d728d06f688764094d342955ab02fc1ff9a6ecb19dc3a46e66ba932b00dc6fd" exitCode=0 Dec 10 00:57:14 crc kubenswrapper[4884]: I1210 00:57:14.322838 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"23ada451-7f1d-4445-b4e9-74fed45cdbcd","Type":"ContainerDied","Data":"1d728d06f688764094d342955ab02fc1ff9a6ecb19dc3a46e66ba932b00dc6fd"} Dec 10 00:57:14 crc kubenswrapper[4884]: I1210 00:57:14.996963 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.072185 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23ada451-7f1d-4445-b4e9-74fed45cdbcd-config-data\") pod \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\" (UID: \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\") " Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.072374 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23ada451-7f1d-4445-b4e9-74fed45cdbcd-combined-ca-bundle\") pod \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\" (UID: \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\") " Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.072478 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4g8xl\" (UniqueName: \"kubernetes.io/projected/23ada451-7f1d-4445-b4e9-74fed45cdbcd-kube-api-access-4g8xl\") pod \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\" (UID: \"23ada451-7f1d-4445-b4e9-74fed45cdbcd\") " Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.078920 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23ada451-7f1d-4445-b4e9-74fed45cdbcd-kube-api-access-4g8xl" (OuterVolumeSpecName: "kube-api-access-4g8xl") pod "23ada451-7f1d-4445-b4e9-74fed45cdbcd" (UID: "23ada451-7f1d-4445-b4e9-74fed45cdbcd"). InnerVolumeSpecName "kube-api-access-4g8xl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.129727 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23ada451-7f1d-4445-b4e9-74fed45cdbcd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "23ada451-7f1d-4445-b4e9-74fed45cdbcd" (UID: "23ada451-7f1d-4445-b4e9-74fed45cdbcd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.139185 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23ada451-7f1d-4445-b4e9-74fed45cdbcd-config-data" (OuterVolumeSpecName: "config-data") pod "23ada451-7f1d-4445-b4e9-74fed45cdbcd" (UID: "23ada451-7f1d-4445-b4e9-74fed45cdbcd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.176227 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23ada451-7f1d-4445-b4e9-74fed45cdbcd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.176302 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4g8xl\" (UniqueName: \"kubernetes.io/projected/23ada451-7f1d-4445-b4e9-74fed45cdbcd-kube-api-access-4g8xl\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.177132 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23ada451-7f1d-4445-b4e9-74fed45cdbcd-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.248177 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.305473 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-24s97"] Dec 10 00:57:15 crc kubenswrapper[4884]: E1210 00:57:15.305870 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23ada451-7f1d-4445-b4e9-74fed45cdbcd" containerName="nova-cell0-conductor-conductor" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.305889 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="23ada451-7f1d-4445-b4e9-74fed45cdbcd" containerName="nova-cell0-conductor-conductor" Dec 10 00:57:15 crc kubenswrapper[4884]: E1210 00:57:15.305905 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="proxy-httpd" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.305913 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="proxy-httpd" Dec 10 00:57:15 crc kubenswrapper[4884]: E1210 00:57:15.305929 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="ceilometer-notification-agent" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.305935 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="ceilometer-notification-agent" Dec 10 00:57:15 crc kubenswrapper[4884]: E1210 00:57:15.305952 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="sg-core" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.305958 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="sg-core" Dec 10 00:57:15 crc kubenswrapper[4884]: E1210 00:57:15.305970 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="ceilometer-central-agent" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.305976 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="ceilometer-central-agent" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.306182 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="proxy-httpd" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.306197 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="23ada451-7f1d-4445-b4e9-74fed45cdbcd" containerName="nova-cell0-conductor-conductor" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.306207 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="ceilometer-central-agent" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.306214 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="ceilometer-notification-agent" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.306232 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerName="sg-core" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.307653 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.319324 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-24s97"] Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.340492 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"23ada451-7f1d-4445-b4e9-74fed45cdbcd","Type":"ContainerDied","Data":"c5a1637a269188a480cd2c27bc47fba83aa67aee93d8026c57a89e40126bdf8a"} Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.340542 4884 scope.go:117] "RemoveContainer" containerID="1d728d06f688764094d342955ab02fc1ff9a6ecb19dc3a46e66ba932b00dc6fd" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.340647 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.348639 4884 generic.go:334] "Generic (PLEG): container finished" podID="a8a8c712-66f2-48ad-857f-8ed05ef28076" containerID="53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033" exitCode=0 Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.348675 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8a8c712-66f2-48ad-857f-8ed05ef28076","Type":"ContainerDied","Data":"53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033"} Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.348698 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a8a8c712-66f2-48ad-857f-8ed05ef28076","Type":"ContainerDied","Data":"87aa17f8f1f8f683421f02937e7697664794e394bbcaf9d5efa375965bc8eaad"} Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.348751 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.370677 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.382555 4884 scope.go:117] "RemoveContainer" containerID="f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.383119 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-scripts\") pod \"a8a8c712-66f2-48ad-857f-8ed05ef28076\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.383168 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvk6w\" (UniqueName: \"kubernetes.io/projected/a8a8c712-66f2-48ad-857f-8ed05ef28076-kube-api-access-lvk6w\") pod \"a8a8c712-66f2-48ad-857f-8ed05ef28076\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.383203 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-combined-ca-bundle\") pod \"a8a8c712-66f2-48ad-857f-8ed05ef28076\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.383234 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8a8c712-66f2-48ad-857f-8ed05ef28076-log-httpd\") pod \"a8a8c712-66f2-48ad-857f-8ed05ef28076\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.383315 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-sg-core-conf-yaml\") pod \"a8a8c712-66f2-48ad-857f-8ed05ef28076\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.383395 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-config-data\") pod \"a8a8c712-66f2-48ad-857f-8ed05ef28076\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.383467 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8a8c712-66f2-48ad-857f-8ed05ef28076-run-httpd\") pod \"a8a8c712-66f2-48ad-857f-8ed05ef28076\" (UID: \"a8a8c712-66f2-48ad-857f-8ed05ef28076\") " Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.383794 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbd36c7b-a8d4-4812-a133-4a82a466dbc7-utilities\") pod \"community-operators-24s97\" (UID: \"fbd36c7b-a8d4-4812-a133-4a82a466dbc7\") " pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.383945 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpdln\" (UniqueName: \"kubernetes.io/projected/fbd36c7b-a8d4-4812-a133-4a82a466dbc7-kube-api-access-mpdln\") pod \"community-operators-24s97\" (UID: \"fbd36c7b-a8d4-4812-a133-4a82a466dbc7\") " pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.383995 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbd36c7b-a8d4-4812-a133-4a82a466dbc7-catalog-content\") pod \"community-operators-24s97\" (UID: \"fbd36c7b-a8d4-4812-a133-4a82a466dbc7\") " pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.384217 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8a8c712-66f2-48ad-857f-8ed05ef28076-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a8a8c712-66f2-48ad-857f-8ed05ef28076" (UID: "a8a8c712-66f2-48ad-857f-8ed05ef28076"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.387230 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a8a8c712-66f2-48ad-857f-8ed05ef28076-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a8a8c712-66f2-48ad-857f-8ed05ef28076" (UID: "a8a8c712-66f2-48ad-857f-8ed05ef28076"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.390014 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-scripts" (OuterVolumeSpecName: "scripts") pod "a8a8c712-66f2-48ad-857f-8ed05ef28076" (UID: "a8a8c712-66f2-48ad-857f-8ed05ef28076"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.396756 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.397414 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8a8c712-66f2-48ad-857f-8ed05ef28076-kube-api-access-lvk6w" (OuterVolumeSpecName: "kube-api-access-lvk6w") pod "a8a8c712-66f2-48ad-857f-8ed05ef28076" (UID: "a8a8c712-66f2-48ad-857f-8ed05ef28076"). InnerVolumeSpecName "kube-api-access-lvk6w". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.416644 4884 scope.go:117] "RemoveContainer" containerID="c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.447531 4884 scope.go:117] "RemoveContainer" containerID="be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.450913 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.452390 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.460947 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mdmcl" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.460958 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.465253 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.473575 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a8a8c712-66f2-48ad-857f-8ed05ef28076" (UID: "a8a8c712-66f2-48ad-857f-8ed05ef28076"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.479930 4884 scope.go:117] "RemoveContainer" containerID="53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.485401 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbd36c7b-a8d4-4812-a133-4a82a466dbc7-catalog-content\") pod \"community-operators-24s97\" (UID: \"fbd36c7b-a8d4-4812-a133-4a82a466dbc7\") " pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.485511 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbd36c7b-a8d4-4812-a133-4a82a466dbc7-utilities\") pod \"community-operators-24s97\" (UID: \"fbd36c7b-a8d4-4812-a133-4a82a466dbc7\") " pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.485594 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpdln\" (UniqueName: \"kubernetes.io/projected/fbd36c7b-a8d4-4812-a133-4a82a466dbc7-kube-api-access-mpdln\") pod \"community-operators-24s97\" (UID: \"fbd36c7b-a8d4-4812-a133-4a82a466dbc7\") " pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.485645 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.485661 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvk6w\" (UniqueName: \"kubernetes.io/projected/a8a8c712-66f2-48ad-857f-8ed05ef28076-kube-api-access-lvk6w\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.485671 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8a8c712-66f2-48ad-857f-8ed05ef28076-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.485680 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.485688 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a8a8c712-66f2-48ad-857f-8ed05ef28076-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.486228 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbd36c7b-a8d4-4812-a133-4a82a466dbc7-catalog-content\") pod \"community-operators-24s97\" (UID: \"fbd36c7b-a8d4-4812-a133-4a82a466dbc7\") " pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.487225 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbd36c7b-a8d4-4812-a133-4a82a466dbc7-utilities\") pod \"community-operators-24s97\" (UID: \"fbd36c7b-a8d4-4812-a133-4a82a466dbc7\") " pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.500468 4884 scope.go:117] "RemoveContainer" containerID="f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b" Dec 10 00:57:15 crc kubenswrapper[4884]: E1210 00:57:15.500771 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b\": container with ID starting with f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b not found: ID does not exist" containerID="f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.500795 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b"} err="failed to get container status \"f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b\": rpc error: code = NotFound desc = could not find container \"f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b\": container with ID starting with f8f294ddb6a85e2812a5071f58a0a8ef9934d09128435141a5891505baffbf5b not found: ID does not exist" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.500815 4884 scope.go:117] "RemoveContainer" containerID="c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d" Dec 10 00:57:15 crc kubenswrapper[4884]: E1210 00:57:15.500989 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d\": container with ID starting with c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d not found: ID does not exist" containerID="c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.501008 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d"} err="failed to get container status \"c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d\": rpc error: code = NotFound desc = could not find container \"c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d\": container with ID starting with c28c82cd904929502609867ad542883924e9e4e0fa3d361edc70f2488f55d47d not found: ID does not exist" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.501020 4884 scope.go:117] "RemoveContainer" containerID="be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38" Dec 10 00:57:15 crc kubenswrapper[4884]: E1210 00:57:15.501239 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38\": container with ID starting with be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38 not found: ID does not exist" containerID="be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.501254 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38"} err="failed to get container status \"be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38\": rpc error: code = NotFound desc = could not find container \"be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38\": container with ID starting with be26a5b6135cdeffe1fedf8df8df81d5a54cc12ee10d3dd21aa2b79165989c38 not found: ID does not exist" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.501265 4884 scope.go:117] "RemoveContainer" containerID="53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033" Dec 10 00:57:15 crc kubenswrapper[4884]: E1210 00:57:15.501661 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033\": container with ID starting with 53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033 not found: ID does not exist" containerID="53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.501679 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033"} err="failed to get container status \"53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033\": rpc error: code = NotFound desc = could not find container \"53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033\": container with ID starting with 53516efb3d8c7976fd193da520658b0227aa8a58a84c376cb33a53db15d46033 not found: ID does not exist" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.503165 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpdln\" (UniqueName: \"kubernetes.io/projected/fbd36c7b-a8d4-4812-a133-4a82a466dbc7-kube-api-access-mpdln\") pod \"community-operators-24s97\" (UID: \"fbd36c7b-a8d4-4812-a133-4a82a466dbc7\") " pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.511949 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8a8c712-66f2-48ad-857f-8ed05ef28076" (UID: "a8a8c712-66f2-48ad-857f-8ed05ef28076"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.541982 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-config-data" (OuterVolumeSpecName: "config-data") pod "a8a8c712-66f2-48ad-857f-8ed05ef28076" (UID: "a8a8c712-66f2-48ad-857f-8ed05ef28076"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.587766 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63998bed-5250-4dc9-a161-3d9ceaf54e5c-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"63998bed-5250-4dc9-a161-3d9ceaf54e5c\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.587842 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63998bed-5250-4dc9-a161-3d9ceaf54e5c-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"63998bed-5250-4dc9-a161-3d9ceaf54e5c\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.587933 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tx5kp\" (UniqueName: \"kubernetes.io/projected/63998bed-5250-4dc9-a161-3d9ceaf54e5c-kube-api-access-tx5kp\") pod \"nova-cell0-conductor-0\" (UID: \"63998bed-5250-4dc9-a161-3d9ceaf54e5c\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.588308 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.588351 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8a8c712-66f2-48ad-857f-8ed05ef28076-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.628348 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.692510 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tx5kp\" (UniqueName: \"kubernetes.io/projected/63998bed-5250-4dc9-a161-3d9ceaf54e5c-kube-api-access-tx5kp\") pod \"nova-cell0-conductor-0\" (UID: \"63998bed-5250-4dc9-a161-3d9ceaf54e5c\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.692632 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63998bed-5250-4dc9-a161-3d9ceaf54e5c-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"63998bed-5250-4dc9-a161-3d9ceaf54e5c\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.692662 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63998bed-5250-4dc9-a161-3d9ceaf54e5c-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"63998bed-5250-4dc9-a161-3d9ceaf54e5c\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.698164 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63998bed-5250-4dc9-a161-3d9ceaf54e5c-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"63998bed-5250-4dc9-a161-3d9ceaf54e5c\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.698477 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63998bed-5250-4dc9-a161-3d9ceaf54e5c-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"63998bed-5250-4dc9-a161-3d9ceaf54e5c\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.720486 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.735084 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tx5kp\" (UniqueName: \"kubernetes.io/projected/63998bed-5250-4dc9-a161-3d9ceaf54e5c-kube-api-access-tx5kp\") pod \"nova-cell0-conductor-0\" (UID: \"63998bed-5250-4dc9-a161-3d9ceaf54e5c\") " pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.748532 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.775972 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.778580 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.779386 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.785718 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.785764 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.791698 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.896609 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b90dc1d7-e444-4c26-9425-748256d855c4-run-httpd\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.896657 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfxz9\" (UniqueName: \"kubernetes.io/projected/b90dc1d7-e444-4c26-9425-748256d855c4-kube-api-access-kfxz9\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.896714 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b90dc1d7-e444-4c26-9425-748256d855c4-log-httpd\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.896732 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-scripts\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.896787 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.896810 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-config-data\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.896846 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.998359 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b90dc1d7-e444-4c26-9425-748256d855c4-run-httpd\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.999126 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfxz9\" (UniqueName: \"kubernetes.io/projected/b90dc1d7-e444-4c26-9425-748256d855c4-kube-api-access-kfxz9\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.999194 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b90dc1d7-e444-4c26-9425-748256d855c4-log-httpd\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.999212 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-scripts\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.999273 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.999295 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-config-data\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:15 crc kubenswrapper[4884]: I1210 00:57:15.999335 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.000124 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b90dc1d7-e444-4c26-9425-748256d855c4-log-httpd\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:15.999071 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b90dc1d7-e444-4c26-9425-748256d855c4-run-httpd\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.004126 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.004172 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.005476 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-config-data\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.006124 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-scripts\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.016205 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfxz9\" (UniqueName: \"kubernetes.io/projected/b90dc1d7-e444-4c26-9425-748256d855c4-kube-api-access-kfxz9\") pod \"ceilometer-0\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " pod="openstack/ceilometer-0" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.101181 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.236969 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-24s97"] Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.367744 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24s97" event={"ID":"fbd36c7b-a8d4-4812-a133-4a82a466dbc7","Type":"ContainerStarted","Data":"d3a210d87aecff60ab1c16449dc235c520613f2d2db7b2171c2eef19402d107b"} Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.370054 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.571687 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:16 crc kubenswrapper[4884]: W1210 00:57:16.571801 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb90dc1d7_e444_4c26_9425_748256d855c4.slice/crio-0f89e1a38a70c6122438ee6181ebb8ebc300f1d967c695a70468375a1ab3fc6d WatchSource:0}: Error finding container 0f89e1a38a70c6122438ee6181ebb8ebc300f1d967c695a70468375a1ab3fc6d: Status 404 returned error can't find the container with id 0f89e1a38a70c6122438ee6181ebb8ebc300f1d967c695a70468375a1ab3fc6d Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.827210 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-96hsw"] Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.829157 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-96hsw" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.847995 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-96hsw"] Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.871966 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-07b9-account-create-update-24rtb"] Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.873343 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-07b9-account-create-update-24rtb" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.875145 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.899909 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-07b9-account-create-update-24rtb"] Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.916265 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb77cff2-06ad-4b10-a4c4-89db9f13c65b-operator-scripts\") pod \"aodh-07b9-account-create-update-24rtb\" (UID: \"bb77cff2-06ad-4b10-a4c4-89db9f13c65b\") " pod="openstack/aodh-07b9-account-create-update-24rtb" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.916298 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4kvg\" (UniqueName: \"kubernetes.io/projected/c50eb7e4-a13a-4700-a41e-5246ff85985b-kube-api-access-g4kvg\") pod \"aodh-db-create-96hsw\" (UID: \"c50eb7e4-a13a-4700-a41e-5246ff85985b\") " pod="openstack/aodh-db-create-96hsw" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.916427 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c50eb7e4-a13a-4700-a41e-5246ff85985b-operator-scripts\") pod \"aodh-db-create-96hsw\" (UID: \"c50eb7e4-a13a-4700-a41e-5246ff85985b\") " pod="openstack/aodh-db-create-96hsw" Dec 10 00:57:16 crc kubenswrapper[4884]: I1210 00:57:16.916466 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzpzp\" (UniqueName: \"kubernetes.io/projected/bb77cff2-06ad-4b10-a4c4-89db9f13c65b-kube-api-access-qzpzp\") pod \"aodh-07b9-account-create-update-24rtb\" (UID: \"bb77cff2-06ad-4b10-a4c4-89db9f13c65b\") " pod="openstack/aodh-07b9-account-create-update-24rtb" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.017917 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c50eb7e4-a13a-4700-a41e-5246ff85985b-operator-scripts\") pod \"aodh-db-create-96hsw\" (UID: \"c50eb7e4-a13a-4700-a41e-5246ff85985b\") " pod="openstack/aodh-db-create-96hsw" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.017967 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzpzp\" (UniqueName: \"kubernetes.io/projected/bb77cff2-06ad-4b10-a4c4-89db9f13c65b-kube-api-access-qzpzp\") pod \"aodh-07b9-account-create-update-24rtb\" (UID: \"bb77cff2-06ad-4b10-a4c4-89db9f13c65b\") " pod="openstack/aodh-07b9-account-create-update-24rtb" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.018039 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb77cff2-06ad-4b10-a4c4-89db9f13c65b-operator-scripts\") pod \"aodh-07b9-account-create-update-24rtb\" (UID: \"bb77cff2-06ad-4b10-a4c4-89db9f13c65b\") " pod="openstack/aodh-07b9-account-create-update-24rtb" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.018073 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4kvg\" (UniqueName: \"kubernetes.io/projected/c50eb7e4-a13a-4700-a41e-5246ff85985b-kube-api-access-g4kvg\") pod \"aodh-db-create-96hsw\" (UID: \"c50eb7e4-a13a-4700-a41e-5246ff85985b\") " pod="openstack/aodh-db-create-96hsw" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.019251 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c50eb7e4-a13a-4700-a41e-5246ff85985b-operator-scripts\") pod \"aodh-db-create-96hsw\" (UID: \"c50eb7e4-a13a-4700-a41e-5246ff85985b\") " pod="openstack/aodh-db-create-96hsw" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.020476 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb77cff2-06ad-4b10-a4c4-89db9f13c65b-operator-scripts\") pod \"aodh-07b9-account-create-update-24rtb\" (UID: \"bb77cff2-06ad-4b10-a4c4-89db9f13c65b\") " pod="openstack/aodh-07b9-account-create-update-24rtb" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.039390 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzpzp\" (UniqueName: \"kubernetes.io/projected/bb77cff2-06ad-4b10-a4c4-89db9f13c65b-kube-api-access-qzpzp\") pod \"aodh-07b9-account-create-update-24rtb\" (UID: \"bb77cff2-06ad-4b10-a4c4-89db9f13c65b\") " pod="openstack/aodh-07b9-account-create-update-24rtb" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.053903 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4kvg\" (UniqueName: \"kubernetes.io/projected/c50eb7e4-a13a-4700-a41e-5246ff85985b-kube-api-access-g4kvg\") pod \"aodh-db-create-96hsw\" (UID: \"c50eb7e4-a13a-4700-a41e-5246ff85985b\") " pod="openstack/aodh-db-create-96hsw" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.213815 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-96hsw" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.224201 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-07b9-account-create-update-24rtb" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.353214 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23ada451-7f1d-4445-b4e9-74fed45cdbcd" path="/var/lib/kubelet/pods/23ada451-7f1d-4445-b4e9-74fed45cdbcd/volumes" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.363110 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8a8c712-66f2-48ad-857f-8ed05ef28076" path="/var/lib/kubelet/pods/a8a8c712-66f2-48ad-857f-8ed05ef28076/volumes" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.393919 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"63998bed-5250-4dc9-a161-3d9ceaf54e5c","Type":"ContainerStarted","Data":"ebfd20126de574002cd7d9e5076a975d5110caaca4de2e28a9bee8eb8196c5fb"} Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.393956 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"63998bed-5250-4dc9-a161-3d9ceaf54e5c","Type":"ContainerStarted","Data":"960bc231125047255f4869a050c584498a4f32dddf93a1e6aec46ae717990ef7"} Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.394396 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.401797 4884 generic.go:334] "Generic (PLEG): container finished" podID="fbd36c7b-a8d4-4812-a133-4a82a466dbc7" containerID="f7e6a8d8153ffe907a4f4ad0ca338b64d65f2df1a700366e0466756b53103608" exitCode=0 Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.401872 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24s97" event={"ID":"fbd36c7b-a8d4-4812-a133-4a82a466dbc7","Type":"ContainerDied","Data":"f7e6a8d8153ffe907a4f4ad0ca338b64d65f2df1a700366e0466756b53103608"} Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.417571 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b90dc1d7-e444-4c26-9425-748256d855c4","Type":"ContainerStarted","Data":"0f89e1a38a70c6122438ee6181ebb8ebc300f1d967c695a70468375a1ab3fc6d"} Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.440931 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.440911726 podStartE2EDuration="2.440911726s" podCreationTimestamp="2025-12-10 00:57:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:57:17.435107409 +0000 UTC m=+1610.513064536" watchObservedRunningTime="2025-12-10 00:57:17.440911726 +0000 UTC m=+1610.518868843" Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.834041 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-96hsw"] Dec 10 00:57:17 crc kubenswrapper[4884]: I1210 00:57:17.935139 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-07b9-account-create-update-24rtb"] Dec 10 00:57:17 crc kubenswrapper[4884]: W1210 00:57:17.940978 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb77cff2_06ad_4b10_a4c4_89db9f13c65b.slice/crio-8c972acfc8217b0e3f0264c7b06f9e911cd41a472ea818184ab185e248fd556a WatchSource:0}: Error finding container 8c972acfc8217b0e3f0264c7b06f9e911cd41a472ea818184ab185e248fd556a: Status 404 returned error can't find the container with id 8c972acfc8217b0e3f0264c7b06f9e911cd41a472ea818184ab185e248fd556a Dec 10 00:57:18 crc kubenswrapper[4884]: I1210 00:57:18.427737 4884 generic.go:334] "Generic (PLEG): container finished" podID="c50eb7e4-a13a-4700-a41e-5246ff85985b" containerID="08cba2307bd9555ee7ef8245ffab41fc9f09781cc44049eb5b5fa323a72bc404" exitCode=0 Dec 10 00:57:18 crc kubenswrapper[4884]: I1210 00:57:18.427826 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-96hsw" event={"ID":"c50eb7e4-a13a-4700-a41e-5246ff85985b","Type":"ContainerDied","Data":"08cba2307bd9555ee7ef8245ffab41fc9f09781cc44049eb5b5fa323a72bc404"} Dec 10 00:57:18 crc kubenswrapper[4884]: I1210 00:57:18.427877 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-96hsw" event={"ID":"c50eb7e4-a13a-4700-a41e-5246ff85985b","Type":"ContainerStarted","Data":"2eb946cfc8e85c45350bfd0d3276420fb1e4fceec11f258af41d379d15276755"} Dec 10 00:57:18 crc kubenswrapper[4884]: I1210 00:57:18.437883 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b90dc1d7-e444-4c26-9425-748256d855c4","Type":"ContainerStarted","Data":"6b21b688c4c983b3192109e446730f5ee48645b4c47b3ff0ca5e743491ca5fe0"} Dec 10 00:57:18 crc kubenswrapper[4884]: I1210 00:57:18.437917 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b90dc1d7-e444-4c26-9425-748256d855c4","Type":"ContainerStarted","Data":"27e1e02783ef18f16d63f6204f9a26347e3568d95e8fb8df5be64d6db7defae0"} Dec 10 00:57:18 crc kubenswrapper[4884]: I1210 00:57:18.447455 4884 generic.go:334] "Generic (PLEG): container finished" podID="bb77cff2-06ad-4b10-a4c4-89db9f13c65b" containerID="281e0c38bca80be7f295a366197a64655016eee0b510252e9df825077f275d05" exitCode=0 Dec 10 00:57:18 crc kubenswrapper[4884]: I1210 00:57:18.447562 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-07b9-account-create-update-24rtb" event={"ID":"bb77cff2-06ad-4b10-a4c4-89db9f13c65b","Type":"ContainerDied","Data":"281e0c38bca80be7f295a366197a64655016eee0b510252e9df825077f275d05"} Dec 10 00:57:18 crc kubenswrapper[4884]: I1210 00:57:18.447641 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-07b9-account-create-update-24rtb" event={"ID":"bb77cff2-06ad-4b10-a4c4-89db9f13c65b","Type":"ContainerStarted","Data":"8c972acfc8217b0e3f0264c7b06f9e911cd41a472ea818184ab185e248fd556a"} Dec 10 00:57:19 crc kubenswrapper[4884]: I1210 00:57:19.461584 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b90dc1d7-e444-4c26-9425-748256d855c4","Type":"ContainerStarted","Data":"e33df5de5d62567a511a10f871fe9f7fa21a223a1d8b946246512431a453352b"} Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.123507 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-96hsw" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.134310 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-07b9-account-create-update-24rtb" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.202418 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4kvg\" (UniqueName: \"kubernetes.io/projected/c50eb7e4-a13a-4700-a41e-5246ff85985b-kube-api-access-g4kvg\") pod \"c50eb7e4-a13a-4700-a41e-5246ff85985b\" (UID: \"c50eb7e4-a13a-4700-a41e-5246ff85985b\") " Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.202532 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzpzp\" (UniqueName: \"kubernetes.io/projected/bb77cff2-06ad-4b10-a4c4-89db9f13c65b-kube-api-access-qzpzp\") pod \"bb77cff2-06ad-4b10-a4c4-89db9f13c65b\" (UID: \"bb77cff2-06ad-4b10-a4c4-89db9f13c65b\") " Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.202653 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c50eb7e4-a13a-4700-a41e-5246ff85985b-operator-scripts\") pod \"c50eb7e4-a13a-4700-a41e-5246ff85985b\" (UID: \"c50eb7e4-a13a-4700-a41e-5246ff85985b\") " Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.202685 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb77cff2-06ad-4b10-a4c4-89db9f13c65b-operator-scripts\") pod \"bb77cff2-06ad-4b10-a4c4-89db9f13c65b\" (UID: \"bb77cff2-06ad-4b10-a4c4-89db9f13c65b\") " Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.203136 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c50eb7e4-a13a-4700-a41e-5246ff85985b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c50eb7e4-a13a-4700-a41e-5246ff85985b" (UID: "c50eb7e4-a13a-4700-a41e-5246ff85985b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.203661 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb77cff2-06ad-4b10-a4c4-89db9f13c65b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bb77cff2-06ad-4b10-a4c4-89db9f13c65b" (UID: "bb77cff2-06ad-4b10-a4c4-89db9f13c65b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.208260 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c50eb7e4-a13a-4700-a41e-5246ff85985b-kube-api-access-g4kvg" (OuterVolumeSpecName: "kube-api-access-g4kvg") pod "c50eb7e4-a13a-4700-a41e-5246ff85985b" (UID: "c50eb7e4-a13a-4700-a41e-5246ff85985b"). InnerVolumeSpecName "kube-api-access-g4kvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.214622 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb77cff2-06ad-4b10-a4c4-89db9f13c65b-kube-api-access-qzpzp" (OuterVolumeSpecName: "kube-api-access-qzpzp") pod "bb77cff2-06ad-4b10-a4c4-89db9f13c65b" (UID: "bb77cff2-06ad-4b10-a4c4-89db9f13c65b"). InnerVolumeSpecName "kube-api-access-qzpzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.305692 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzpzp\" (UniqueName: \"kubernetes.io/projected/bb77cff2-06ad-4b10-a4c4-89db9f13c65b-kube-api-access-qzpzp\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.305732 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c50eb7e4-a13a-4700-a41e-5246ff85985b-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.305745 4884 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb77cff2-06ad-4b10-a4c4-89db9f13c65b-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.305756 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4kvg\" (UniqueName: \"kubernetes.io/projected/c50eb7e4-a13a-4700-a41e-5246ff85985b-kube-api-access-g4kvg\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.477081 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-96hsw" event={"ID":"c50eb7e4-a13a-4700-a41e-5246ff85985b","Type":"ContainerDied","Data":"2eb946cfc8e85c45350bfd0d3276420fb1e4fceec11f258af41d379d15276755"} Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.477119 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-96hsw" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.477543 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2eb946cfc8e85c45350bfd0d3276420fb1e4fceec11f258af41d379d15276755" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.480121 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b90dc1d7-e444-4c26-9425-748256d855c4","Type":"ContainerStarted","Data":"20d66aebf317cc913631679235b1b95a8ad324611cf6925544fec246ed67776b"} Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.480272 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.482219 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-07b9-account-create-update-24rtb" event={"ID":"bb77cff2-06ad-4b10-a4c4-89db9f13c65b","Type":"ContainerDied","Data":"8c972acfc8217b0e3f0264c7b06f9e911cd41a472ea818184ab185e248fd556a"} Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.482249 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c972acfc8217b0e3f0264c7b06f9e911cd41a472ea818184ab185e248fd556a" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.482369 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-07b9-account-create-update-24rtb" Dec 10 00:57:20 crc kubenswrapper[4884]: I1210 00:57:20.517210 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.126264705 podStartE2EDuration="5.517190504s" podCreationTimestamp="2025-12-10 00:57:15 +0000 UTC" firstStartedPulling="2025-12-10 00:57:16.575396637 +0000 UTC m=+1609.653353754" lastFinishedPulling="2025-12-10 00:57:19.966322436 +0000 UTC m=+1613.044279553" observedRunningTime="2025-12-10 00:57:20.515112028 +0000 UTC m=+1613.593069165" watchObservedRunningTime="2025-12-10 00:57:20.517190504 +0000 UTC m=+1613.595147631" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.357951 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-5nqf7"] Dec 10 00:57:22 crc kubenswrapper[4884]: E1210 00:57:22.358633 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c50eb7e4-a13a-4700-a41e-5246ff85985b" containerName="mariadb-database-create" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.358645 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c50eb7e4-a13a-4700-a41e-5246ff85985b" containerName="mariadb-database-create" Dec 10 00:57:22 crc kubenswrapper[4884]: E1210 00:57:22.358669 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb77cff2-06ad-4b10-a4c4-89db9f13c65b" containerName="mariadb-account-create-update" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.358675 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb77cff2-06ad-4b10-a4c4-89db9f13c65b" containerName="mariadb-account-create-update" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.358854 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb77cff2-06ad-4b10-a4c4-89db9f13c65b" containerName="mariadb-account-create-update" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.358882 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c50eb7e4-a13a-4700-a41e-5246ff85985b" containerName="mariadb-database-create" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.359608 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.365509 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-pd287" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.365674 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.365916 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.366509 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.372509 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-5nqf7"] Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.461875 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnqst\" (UniqueName: \"kubernetes.io/projected/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-kube-api-access-gnqst\") pod \"aodh-db-sync-5nqf7\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.461932 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-combined-ca-bundle\") pod \"aodh-db-sync-5nqf7\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.461976 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-scripts\") pod \"aodh-db-sync-5nqf7\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.462021 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-config-data\") pod \"aodh-db-sync-5nqf7\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.564005 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnqst\" (UniqueName: \"kubernetes.io/projected/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-kube-api-access-gnqst\") pod \"aodh-db-sync-5nqf7\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.564066 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-combined-ca-bundle\") pod \"aodh-db-sync-5nqf7\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.564111 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-scripts\") pod \"aodh-db-sync-5nqf7\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.564156 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-config-data\") pod \"aodh-db-sync-5nqf7\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.572964 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-combined-ca-bundle\") pod \"aodh-db-sync-5nqf7\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.574630 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-config-data\") pod \"aodh-db-sync-5nqf7\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.580738 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-scripts\") pod \"aodh-db-sync-5nqf7\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.589852 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnqst\" (UniqueName: \"kubernetes.io/projected/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-kube-api-access-gnqst\") pod \"aodh-db-sync-5nqf7\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:22 crc kubenswrapper[4884]: I1210 00:57:22.687055 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:24 crc kubenswrapper[4884]: I1210 00:57:24.828762 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-5nqf7"] Dec 10 00:57:25 crc kubenswrapper[4884]: I1210 00:57:25.550822 4884 generic.go:334] "Generic (PLEG): container finished" podID="fbd36c7b-a8d4-4812-a133-4a82a466dbc7" containerID="a93b5c3cfe8298d95b43f20d7e65264b9cb8da8227245eee0660e4c7f386e7fc" exitCode=0 Dec 10 00:57:25 crc kubenswrapper[4884]: I1210 00:57:25.550899 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24s97" event={"ID":"fbd36c7b-a8d4-4812-a133-4a82a466dbc7","Type":"ContainerDied","Data":"a93b5c3cfe8298d95b43f20d7e65264b9cb8da8227245eee0660e4c7f386e7fc"} Dec 10 00:57:25 crc kubenswrapper[4884]: I1210 00:57:25.554852 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-5nqf7" event={"ID":"2068f8d7-d11e-4ae1-845b-6a542dfe62fe","Type":"ContainerStarted","Data":"a6bd76a1ea7da9008ebab0c4d8b84cd5eede51e8e73cec4049d517e9781c6b8d"} Dec 10 00:57:25 crc kubenswrapper[4884]: I1210 00:57:25.826011 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.287062 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:57:26 crc kubenswrapper[4884]: E1210 00:57:26.288159 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.350809 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-2m7nt"] Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.353728 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.356076 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.356304 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.373589 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-2m7nt"] Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.451966 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2m7nt\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.452017 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-scripts\") pod \"nova-cell0-cell-mapping-2m7nt\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.452145 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-config-data\") pod \"nova-cell0-cell-mapping-2m7nt\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.452164 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xs7tx\" (UniqueName: \"kubernetes.io/projected/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-kube-api-access-xs7tx\") pod \"nova-cell0-cell-mapping-2m7nt\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.549041 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.550758 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.557237 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-config-data\") pod \"nova-cell0-cell-mapping-2m7nt\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.557285 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xs7tx\" (UniqueName: \"kubernetes.io/projected/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-kube-api-access-xs7tx\") pod \"nova-cell0-cell-mapping-2m7nt\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.557428 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2m7nt\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.557493 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-scripts\") pod \"nova-cell0-cell-mapping-2m7nt\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.563578 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.571327 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-2m7nt\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.575119 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-config-data\") pod \"nova-cell0-cell-mapping-2m7nt\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.576649 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.595530 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-scripts\") pod \"nova-cell0-cell-mapping-2m7nt\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.601336 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xs7tx\" (UniqueName: \"kubernetes.io/projected/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-kube-api-access-xs7tx\") pod \"nova-cell0-cell-mapping-2m7nt\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.618844 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-24s97" event={"ID":"fbd36c7b-a8d4-4812-a133-4a82a466dbc7","Type":"ContainerStarted","Data":"8051daa255cd2873ae19cdc7447ecf316cec54fe8791f62f1592c77a4e3f552d"} Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.664471 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b3a947-00f5-41a8-98f7-e1c8571f8309-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.664556 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f7b3a947-00f5-41a8-98f7-e1c8571f8309-logs\") pod \"nova-api-0\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.664632 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7ncl\" (UniqueName: \"kubernetes.io/projected/f7b3a947-00f5-41a8-98f7-e1c8571f8309-kube-api-access-p7ncl\") pod \"nova-api-0\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.664681 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b3a947-00f5-41a8-98f7-e1c8571f8309-config-data\") pod \"nova-api-0\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.670750 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.708422 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.709778 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.742669 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.766851 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b3a947-00f5-41a8-98f7-e1c8571f8309-config-data\") pod \"nova-api-0\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.766971 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b3a947-00f5-41a8-98f7-e1c8571f8309-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.767031 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f7b3a947-00f5-41a8-98f7-e1c8571f8309-logs\") pod \"nova-api-0\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.767140 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7ncl\" (UniqueName: \"kubernetes.io/projected/f7b3a947-00f5-41a8-98f7-e1c8571f8309-kube-api-access-p7ncl\") pod \"nova-api-0\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.772729 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f7b3a947-00f5-41a8-98f7-e1c8571f8309-logs\") pod \"nova-api-0\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.773241 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.785733 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.787139 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b3a947-00f5-41a8-98f7-e1c8571f8309-config-data\") pod \"nova-api-0\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.796700 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.800533 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b3a947-00f5-41a8-98f7-e1c8571f8309-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.811541 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7ncl\" (UniqueName: \"kubernetes.io/projected/f7b3a947-00f5-41a8-98f7-e1c8571f8309-kube-api-access-p7ncl\") pod \"nova-api-0\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " pod="openstack/nova-api-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.837147 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.877503 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.880553 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77b234d2-981d-4198-a62b-3d0b68bde48a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"77b234d2-981d-4198-a62b-3d0b68bde48a\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.880691 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmxdm\" (UniqueName: \"kubernetes.io/projected/77b234d2-981d-4198-a62b-3d0b68bde48a-kube-api-access-zmxdm\") pod \"nova-scheduler-0\" (UID: \"77b234d2-981d-4198-a62b-3d0b68bde48a\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.880808 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " pod="openstack/nova-metadata-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.880906 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q4qn\" (UniqueName: \"kubernetes.io/projected/7dd09275-74e3-44a8-841c-25b0068a1457-kube-api-access-4q4qn\") pod \"nova-metadata-0\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " pod="openstack/nova-metadata-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.880926 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77b234d2-981d-4198-a62b-3d0b68bde48a-config-data\") pod \"nova-scheduler-0\" (UID: \"77b234d2-981d-4198-a62b-3d0b68bde48a\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.880961 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-config-data\") pod \"nova-metadata-0\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " pod="openstack/nova-metadata-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.881022 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7dd09275-74e3-44a8-841c-25b0068a1457-logs\") pod \"nova-metadata-0\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " pod="openstack/nova-metadata-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.883408 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-24s97" podStartSLOduration=3.261646422 podStartE2EDuration="11.883391336s" podCreationTimestamp="2025-12-10 00:57:15 +0000 UTC" firstStartedPulling="2025-12-10 00:57:17.407396518 +0000 UTC m=+1610.485353635" lastFinishedPulling="2025-12-10 00:57:26.029141432 +0000 UTC m=+1619.107098549" observedRunningTime="2025-12-10 00:57:26.699820114 +0000 UTC m=+1619.777777231" watchObservedRunningTime="2025-12-10 00:57:26.883391336 +0000 UTC m=+1619.961348453" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.923924 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-fp9tb"] Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.926003 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.936591 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-fp9tb"] Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.965086 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.966591 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.980678 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.983750 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/361cfe8a-f31e-49de-805e-f2d0b259d533-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"361cfe8a-f31e-49de-805e-f2d0b259d533\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.983799 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmxdm\" (UniqueName: \"kubernetes.io/projected/77b234d2-981d-4198-a62b-3d0b68bde48a-kube-api-access-zmxdm\") pod \"nova-scheduler-0\" (UID: \"77b234d2-981d-4198-a62b-3d0b68bde48a\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.983834 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-dns-svc\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.983883 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-dns-swift-storage-0\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.983901 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-ovsdbserver-sb\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.983920 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " pod="openstack/nova-metadata-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.983968 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-ovsdbserver-nb\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.983987 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q4qn\" (UniqueName: \"kubernetes.io/projected/7dd09275-74e3-44a8-841c-25b0068a1457-kube-api-access-4q4qn\") pod \"nova-metadata-0\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " pod="openstack/nova-metadata-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.984003 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77b234d2-981d-4198-a62b-3d0b68bde48a-config-data\") pod \"nova-scheduler-0\" (UID: \"77b234d2-981d-4198-a62b-3d0b68bde48a\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.984044 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-config-data\") pod \"nova-metadata-0\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " pod="openstack/nova-metadata-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.984076 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qpdm\" (UniqueName: \"kubernetes.io/projected/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-kube-api-access-4qpdm\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.984110 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7dd09275-74e3-44a8-841c-25b0068a1457-logs\") pod \"nova-metadata-0\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " pod="openstack/nova-metadata-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.984134 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvfbz\" (UniqueName: \"kubernetes.io/projected/361cfe8a-f31e-49de-805e-f2d0b259d533-kube-api-access-hvfbz\") pod \"nova-cell1-novncproxy-0\" (UID: \"361cfe8a-f31e-49de-805e-f2d0b259d533\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.984165 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-config\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.984183 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/361cfe8a-f31e-49de-805e-f2d0b259d533-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"361cfe8a-f31e-49de-805e-f2d0b259d533\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.984204 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77b234d2-981d-4198-a62b-3d0b68bde48a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"77b234d2-981d-4198-a62b-3d0b68bde48a\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.991851 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7dd09275-74e3-44a8-841c-25b0068a1457-logs\") pod \"nova-metadata-0\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " pod="openstack/nova-metadata-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.996624 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77b234d2-981d-4198-a62b-3d0b68bde48a-config-data\") pod \"nova-scheduler-0\" (UID: \"77b234d2-981d-4198-a62b-3d0b68bde48a\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:26 crc kubenswrapper[4884]: I1210 00:57:26.999567 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77b234d2-981d-4198-a62b-3d0b68bde48a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"77b234d2-981d-4198-a62b-3d0b68bde48a\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.003785 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.009620 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmxdm\" (UniqueName: \"kubernetes.io/projected/77b234d2-981d-4198-a62b-3d0b68bde48a-kube-api-access-zmxdm\") pod \"nova-scheduler-0\" (UID: \"77b234d2-981d-4198-a62b-3d0b68bde48a\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.009961 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q4qn\" (UniqueName: \"kubernetes.io/projected/7dd09275-74e3-44a8-841c-25b0068a1457-kube-api-access-4q4qn\") pod \"nova-metadata-0\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " pod="openstack/nova-metadata-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.014388 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " pod="openstack/nova-metadata-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.016100 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-config-data\") pod \"nova-metadata-0\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " pod="openstack/nova-metadata-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.058634 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.085669 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-config\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.085718 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/361cfe8a-f31e-49de-805e-f2d0b259d533-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"361cfe8a-f31e-49de-805e-f2d0b259d533\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.085776 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/361cfe8a-f31e-49de-805e-f2d0b259d533-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"361cfe8a-f31e-49de-805e-f2d0b259d533\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.085815 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-dns-svc\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.085857 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-dns-swift-storage-0\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.085875 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-ovsdbserver-sb\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.085923 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-ovsdbserver-nb\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.085952 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qpdm\" (UniqueName: \"kubernetes.io/projected/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-kube-api-access-4qpdm\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.085988 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvfbz\" (UniqueName: \"kubernetes.io/projected/361cfe8a-f31e-49de-805e-f2d0b259d533-kube-api-access-hvfbz\") pod \"nova-cell1-novncproxy-0\" (UID: \"361cfe8a-f31e-49de-805e-f2d0b259d533\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.094352 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-dns-svc\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.094916 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-config\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.109614 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-dns-swift-storage-0\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.110393 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-ovsdbserver-sb\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.111281 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/361cfe8a-f31e-49de-805e-f2d0b259d533-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"361cfe8a-f31e-49de-805e-f2d0b259d533\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.111585 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-ovsdbserver-nb\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.128777 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/361cfe8a-f31e-49de-805e-f2d0b259d533-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"361cfe8a-f31e-49de-805e-f2d0b259d533\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.129112 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qpdm\" (UniqueName: \"kubernetes.io/projected/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-kube-api-access-4qpdm\") pod \"dnsmasq-dns-568d7fd7cf-fp9tb\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.133255 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvfbz\" (UniqueName: \"kubernetes.io/projected/361cfe8a-f31e-49de-805e-f2d0b259d533-kube-api-access-hvfbz\") pod \"nova-cell1-novncproxy-0\" (UID: \"361cfe8a-f31e-49de-805e-f2d0b259d533\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.220920 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.256895 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.280795 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.308295 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:57:27 crc kubenswrapper[4884]: W1210 00:57:27.466287 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac5ac12f_a8ae_4710_a058_2eed2b61a0e3.slice/crio-b2bb41b7be07e93db6085d34fe9846322221af042b90e4a4a4e029a5d0ee98ad WatchSource:0}: Error finding container b2bb41b7be07e93db6085d34fe9846322221af042b90e4a4a4e029a5d0ee98ad: Status 404 returned error can't find the container with id b2bb41b7be07e93db6085d34fe9846322221af042b90e4a4a4e029a5d0ee98ad Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.490792 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-2m7nt"] Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.678603 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:57:27 crc kubenswrapper[4884]: I1210 00:57:27.685986 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2m7nt" event={"ID":"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3","Type":"ContainerStarted","Data":"b2bb41b7be07e93db6085d34fe9846322221af042b90e4a4a4e029a5d0ee98ad"} Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.206340 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.280367 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-fp9tb"] Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.323499 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-mrh8v"] Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.324972 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.328631 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.328706 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.339819 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-mrh8v"] Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.361162 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-config-data\") pod \"nova-cell1-conductor-db-sync-mrh8v\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.361412 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-scripts\") pod \"nova-cell1-conductor-db-sync-mrh8v\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.361600 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wskbn\" (UniqueName: \"kubernetes.io/projected/595f30cf-13ba-48bb-949d-393f11660091-kube-api-access-wskbn\") pod \"nova-cell1-conductor-db-sync-mrh8v\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.361733 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-mrh8v\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.465749 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-mrh8v\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.465959 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-config-data\") pod \"nova-cell1-conductor-db-sync-mrh8v\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.466054 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-scripts\") pod \"nova-cell1-conductor-db-sync-mrh8v\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.466115 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wskbn\" (UniqueName: \"kubernetes.io/projected/595f30cf-13ba-48bb-949d-393f11660091-kube-api-access-wskbn\") pod \"nova-cell1-conductor-db-sync-mrh8v\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.485991 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-config-data\") pod \"nova-cell1-conductor-db-sync-mrh8v\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.486487 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wskbn\" (UniqueName: \"kubernetes.io/projected/595f30cf-13ba-48bb-949d-393f11660091-kube-api-access-wskbn\") pod \"nova-cell1-conductor-db-sync-mrh8v\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.487550 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-scripts\") pod \"nova-cell1-conductor-db-sync-mrh8v\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.487985 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-mrh8v\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: W1210 00:57:28.616185 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77b234d2_981d_4198_a62b_3d0b68bde48a.slice/crio-54e50cc2906e579919222f5ad73f5628b046b7718c4c49ca7e218697db3ee7fe WatchSource:0}: Error finding container 54e50cc2906e579919222f5ad73f5628b046b7718c4c49ca7e218697db3ee7fe: Status 404 returned error can't find the container with id 54e50cc2906e579919222f5ad73f5628b046b7718c4c49ca7e218697db3ee7fe Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.619553 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.629718 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.678289 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.700727 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"361cfe8a-f31e-49de-805e-f2d0b259d533","Type":"ContainerStarted","Data":"ee8843f078c92e549630f171534ccbfd87e34d77b0f344fe7ef243d03e8a8342"} Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.702789 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f7b3a947-00f5-41a8-98f7-e1c8571f8309","Type":"ContainerStarted","Data":"c08526694c46477e7d7d66d094ac479dca8470bca778f10fbf3c01413fd22c85"} Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.703833 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" event={"ID":"a1218ce1-2ed9-4191-9965-5c7c8ef9e842","Type":"ContainerStarted","Data":"ed2ac6de9929fbdf13c4615024f477211f102c709d2bd8475de1ba558b031edc"} Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.705714 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7dd09275-74e3-44a8-841c-25b0068a1457","Type":"ContainerStarted","Data":"8d03a79b2954bca614fbc08ed3c031943860a02c886c865801b3fc4ba080b25b"} Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.708243 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"77b234d2-981d-4198-a62b-3d0b68bde48a","Type":"ContainerStarted","Data":"54e50cc2906e579919222f5ad73f5628b046b7718c4c49ca7e218697db3ee7fe"} Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.709715 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2m7nt" event={"ID":"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3","Type":"ContainerStarted","Data":"def70ce602df51c6e48669ad5e4d78f29a59d87694800c6bb5dc62d31e3780b9"} Dec 10 00:57:28 crc kubenswrapper[4884]: I1210 00:57:28.733111 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-2m7nt" podStartSLOduration=2.733089907 podStartE2EDuration="2.733089907s" podCreationTimestamp="2025-12-10 00:57:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:57:28.726880969 +0000 UTC m=+1621.804838086" watchObservedRunningTime="2025-12-10 00:57:28.733089907 +0000 UTC m=+1621.811047024" Dec 10 00:57:29 crc kubenswrapper[4884]: I1210 00:57:29.728177 4884 generic.go:334] "Generic (PLEG): container finished" podID="a1218ce1-2ed9-4191-9965-5c7c8ef9e842" containerID="56a97e566bd52088cf2aa5f52aaa58f5e5de37c6ca138f38aef12405e03a8ba9" exitCode=0 Dec 10 00:57:29 crc kubenswrapper[4884]: I1210 00:57:29.728638 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" event={"ID":"a1218ce1-2ed9-4191-9965-5c7c8ef9e842","Type":"ContainerDied","Data":"56a97e566bd52088cf2aa5f52aaa58f5e5de37c6ca138f38aef12405e03a8ba9"} Dec 10 00:57:30 crc kubenswrapper[4884]: I1210 00:57:30.306825 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:57:30 crc kubenswrapper[4884]: I1210 00:57:30.332528 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 00:57:35 crc kubenswrapper[4884]: I1210 00:57:35.204131 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-mrh8v"] Dec 10 00:57:35 crc kubenswrapper[4884]: I1210 00:57:35.629124 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:35 crc kubenswrapper[4884]: I1210 00:57:35.629176 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:35 crc kubenswrapper[4884]: I1210 00:57:35.678687 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:35 crc kubenswrapper[4884]: I1210 00:57:35.909405 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-24s97" Dec 10 00:57:36 crc kubenswrapper[4884]: I1210 00:57:36.014094 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-24s97"] Dec 10 00:57:36 crc kubenswrapper[4884]: I1210 00:57:36.068524 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dgn4r"] Dec 10 00:57:36 crc kubenswrapper[4884]: I1210 00:57:36.068788 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dgn4r" podUID="8078d872-21e8-4dfa-a907-5b3ce3759920" containerName="registry-server" containerID="cri-o://4a516347e5ca0a2ac822ca8d74260b0a399b34ea1ef69ec98fd72a3b8f0f8ed9" gracePeriod=2 Dec 10 00:57:36 crc kubenswrapper[4884]: W1210 00:57:36.525145 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod595f30cf_13ba_48bb_949d_393f11660091.slice/crio-b845831f13bfe9cdd4ae6d250fec3617f5eb7c78b7bbd5b063bda89a17da563b WatchSource:0}: Error finding container b845831f13bfe9cdd4ae6d250fec3617f5eb7c78b7bbd5b063bda89a17da563b: Status 404 returned error can't find the container with id b845831f13bfe9cdd4ae6d250fec3617f5eb7c78b7bbd5b063bda89a17da563b Dec 10 00:57:36 crc kubenswrapper[4884]: I1210 00:57:36.850990 4884 generic.go:334] "Generic (PLEG): container finished" podID="8078d872-21e8-4dfa-a907-5b3ce3759920" containerID="4a516347e5ca0a2ac822ca8d74260b0a399b34ea1ef69ec98fd72a3b8f0f8ed9" exitCode=0 Dec 10 00:57:36 crc kubenswrapper[4884]: I1210 00:57:36.851353 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgn4r" event={"ID":"8078d872-21e8-4dfa-a907-5b3ce3759920","Type":"ContainerDied","Data":"4a516347e5ca0a2ac822ca8d74260b0a399b34ea1ef69ec98fd72a3b8f0f8ed9"} Dec 10 00:57:36 crc kubenswrapper[4884]: I1210 00:57:36.855695 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-mrh8v" event={"ID":"595f30cf-13ba-48bb-949d-393f11660091","Type":"ContainerStarted","Data":"b845831f13bfe9cdd4ae6d250fec3617f5eb7c78b7bbd5b063bda89a17da563b"} Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.214288 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.349540 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:57:37 crc kubenswrapper[4884]: E1210 00:57:37.350266 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.441249 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.574229 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8078d872-21e8-4dfa-a907-5b3ce3759920-catalog-content\") pod \"8078d872-21e8-4dfa-a907-5b3ce3759920\" (UID: \"8078d872-21e8-4dfa-a907-5b3ce3759920\") " Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.574441 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62kkf\" (UniqueName: \"kubernetes.io/projected/8078d872-21e8-4dfa-a907-5b3ce3759920-kube-api-access-62kkf\") pod \"8078d872-21e8-4dfa-a907-5b3ce3759920\" (UID: \"8078d872-21e8-4dfa-a907-5b3ce3759920\") " Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.574569 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8078d872-21e8-4dfa-a907-5b3ce3759920-utilities\") pod \"8078d872-21e8-4dfa-a907-5b3ce3759920\" (UID: \"8078d872-21e8-4dfa-a907-5b3ce3759920\") " Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.579327 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8078d872-21e8-4dfa-a907-5b3ce3759920-utilities" (OuterVolumeSpecName: "utilities") pod "8078d872-21e8-4dfa-a907-5b3ce3759920" (UID: "8078d872-21e8-4dfa-a907-5b3ce3759920"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.589016 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8078d872-21e8-4dfa-a907-5b3ce3759920-kube-api-access-62kkf" (OuterVolumeSpecName: "kube-api-access-62kkf") pod "8078d872-21e8-4dfa-a907-5b3ce3759920" (UID: "8078d872-21e8-4dfa-a907-5b3ce3759920"). InnerVolumeSpecName "kube-api-access-62kkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.636751 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8078d872-21e8-4dfa-a907-5b3ce3759920-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8078d872-21e8-4dfa-a907-5b3ce3759920" (UID: "8078d872-21e8-4dfa-a907-5b3ce3759920"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.678083 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8078d872-21e8-4dfa-a907-5b3ce3759920-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.678111 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62kkf\" (UniqueName: \"kubernetes.io/projected/8078d872-21e8-4dfa-a907-5b3ce3759920-kube-api-access-62kkf\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.678124 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8078d872-21e8-4dfa-a907-5b3ce3759920-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.878109 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" event={"ID":"a1218ce1-2ed9-4191-9965-5c7c8ef9e842","Type":"ContainerStarted","Data":"72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6"} Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.878340 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.881876 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f7b3a947-00f5-41a8-98f7-e1c8571f8309","Type":"ContainerStarted","Data":"3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045"} Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.895653 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dgn4r" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.895653 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dgn4r" event={"ID":"8078d872-21e8-4dfa-a907-5b3ce3759920","Type":"ContainerDied","Data":"e26f9cc10c04d74cd2b0dd234f2b9bf9c32267fcfdf06567cbb59890065bdc26"} Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.896382 4884 scope.go:117] "RemoveContainer" containerID="4a516347e5ca0a2ac822ca8d74260b0a399b34ea1ef69ec98fd72a3b8f0f8ed9" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.908724 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="361cfe8a-f31e-49de-805e-f2d0b259d533" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82" gracePeriod=30 Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.939610 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" podStartSLOduration=11.939593818 podStartE2EDuration="11.939593818s" podCreationTimestamp="2025-12-10 00:57:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:57:37.910112309 +0000 UTC m=+1630.988069436" watchObservedRunningTime="2025-12-10 00:57:37.939593818 +0000 UTC m=+1631.017550935" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.963356 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.216911759 podStartE2EDuration="11.96333442s" podCreationTimestamp="2025-12-10 00:57:26 +0000 UTC" firstStartedPulling="2025-12-10 00:57:28.643167172 +0000 UTC m=+1621.721124289" lastFinishedPulling="2025-12-10 00:57:37.389589833 +0000 UTC m=+1630.467546950" observedRunningTime="2025-12-10 00:57:37.928859537 +0000 UTC m=+1631.006816674" watchObservedRunningTime="2025-12-10 00:57:37.96333442 +0000 UTC m=+1631.041291537" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.968623 4884 scope.go:117] "RemoveContainer" containerID="d1a493e23bf340e3bc2c7bc33f15c9444b49605791a83d8f235ab16172f3ffdf" Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.973521 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dgn4r"] Dec 10 00:57:37 crc kubenswrapper[4884]: I1210 00:57:37.986817 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dgn4r"] Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.020788 4884 scope.go:117] "RemoveContainer" containerID="57f270c5e11e72cb6c57882f74efe61073adbb1f920145be29d7adb6845b8617" Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.918292 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"77b234d2-981d-4198-a62b-3d0b68bde48a","Type":"ContainerStarted","Data":"738e0fb808ffe22cfac765cfb266fc72cc28270e1a4fb6784fc1dcd1ff93b524"} Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.923098 4884 generic.go:334] "Generic (PLEG): container finished" podID="ac5ac12f-a8ae-4710-a058-2eed2b61a0e3" containerID="def70ce602df51c6e48669ad5e4d78f29a59d87694800c6bb5dc62d31e3780b9" exitCode=0 Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.923202 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2m7nt" event={"ID":"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3","Type":"ContainerDied","Data":"def70ce602df51c6e48669ad5e4d78f29a59d87694800c6bb5dc62d31e3780b9"} Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.926497 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-mrh8v" event={"ID":"595f30cf-13ba-48bb-949d-393f11660091","Type":"ContainerStarted","Data":"1d164ad48e4dd6427611499a874a60b310b0ad64b7401cf5175ec98429159a81"} Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.932675 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"361cfe8a-f31e-49de-805e-f2d0b259d533","Type":"ContainerStarted","Data":"83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82"} Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.935227 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f7b3a947-00f5-41a8-98f7-e1c8571f8309","Type":"ContainerStarted","Data":"24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339"} Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.941271 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-5nqf7" event={"ID":"2068f8d7-d11e-4ae1-845b-6a542dfe62fe","Type":"ContainerStarted","Data":"a68414962cf847f04f21f34dcd1048893c60fc9263491ca6f386e0eb50c9e06b"} Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.946712 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=4.176712082 podStartE2EDuration="12.9466853s" podCreationTimestamp="2025-12-10 00:57:26 +0000 UTC" firstStartedPulling="2025-12-10 00:57:28.619798689 +0000 UTC m=+1621.697755806" lastFinishedPulling="2025-12-10 00:57:37.389771907 +0000 UTC m=+1630.467729024" observedRunningTime="2025-12-10 00:57:38.94298557 +0000 UTC m=+1632.020942697" watchObservedRunningTime="2025-12-10 00:57:38.9466853 +0000 UTC m=+1632.024642427" Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.952401 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7dd09275-74e3-44a8-841c-25b0068a1457","Type":"ContainerStarted","Data":"53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7"} Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.952670 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7dd09275-74e3-44a8-841c-25b0068a1457","Type":"ContainerStarted","Data":"a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee"} Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.952635 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7dd09275-74e3-44a8-841c-25b0068a1457" containerName="nova-metadata-metadata" containerID="cri-o://53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7" gracePeriod=30 Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.952423 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7dd09275-74e3-44a8-841c-25b0068a1457" containerName="nova-metadata-log" containerID="cri-o://a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee" gracePeriod=30 Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.974765 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-mrh8v" podStartSLOduration=10.97474978 podStartE2EDuration="10.97474978s" podCreationTimestamp="2025-12-10 00:57:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:57:38.966733593 +0000 UTC m=+1632.044690730" watchObservedRunningTime="2025-12-10 00:57:38.97474978 +0000 UTC m=+1632.052706897" Dec 10 00:57:38 crc kubenswrapper[4884]: I1210 00:57:38.988538 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.512531044 podStartE2EDuration="12.988521913s" podCreationTimestamp="2025-12-10 00:57:26 +0000 UTC" firstStartedPulling="2025-12-10 00:57:27.772646767 +0000 UTC m=+1620.850603884" lastFinishedPulling="2025-12-10 00:57:37.248637646 +0000 UTC m=+1630.326594753" observedRunningTime="2025-12-10 00:57:38.986656942 +0000 UTC m=+1632.064614059" watchObservedRunningTime="2025-12-10 00:57:38.988521913 +0000 UTC m=+1632.066479030" Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.019377 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-5nqf7" podStartSLOduration=4.658574037 podStartE2EDuration="17.019349468s" podCreationTimestamp="2025-12-10 00:57:22 +0000 UTC" firstStartedPulling="2025-12-10 00:57:24.848137569 +0000 UTC m=+1617.926094686" lastFinishedPulling="2025-12-10 00:57:37.208913 +0000 UTC m=+1630.286870117" observedRunningTime="2025-12-10 00:57:39.009957354 +0000 UTC m=+1632.087914471" watchObservedRunningTime="2025-12-10 00:57:39.019349468 +0000 UTC m=+1632.097306615" Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.063532 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.979570553 podStartE2EDuration="13.063508694s" podCreationTimestamp="2025-12-10 00:57:26 +0000 UTC" firstStartedPulling="2025-12-10 00:57:28.231628137 +0000 UTC m=+1621.309585264" lastFinishedPulling="2025-12-10 00:57:37.315566288 +0000 UTC m=+1630.393523405" observedRunningTime="2025-12-10 00:57:39.045205338 +0000 UTC m=+1632.123162475" watchObservedRunningTime="2025-12-10 00:57:39.063508694 +0000 UTC m=+1632.141465811" Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.305960 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8078d872-21e8-4dfa-a907-5b3ce3759920" path="/var/lib/kubelet/pods/8078d872-21e8-4dfa-a907-5b3ce3759920/volumes" Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.664945 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.832747 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7dd09275-74e3-44a8-841c-25b0068a1457-logs\") pod \"7dd09275-74e3-44a8-841c-25b0068a1457\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.832827 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-combined-ca-bundle\") pod \"7dd09275-74e3-44a8-841c-25b0068a1457\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.832998 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4q4qn\" (UniqueName: \"kubernetes.io/projected/7dd09275-74e3-44a8-841c-25b0068a1457-kube-api-access-4q4qn\") pod \"7dd09275-74e3-44a8-841c-25b0068a1457\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.833114 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7dd09275-74e3-44a8-841c-25b0068a1457-logs" (OuterVolumeSpecName: "logs") pod "7dd09275-74e3-44a8-841c-25b0068a1457" (UID: "7dd09275-74e3-44a8-841c-25b0068a1457"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.833605 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-config-data\") pod \"7dd09275-74e3-44a8-841c-25b0068a1457\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.834211 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7dd09275-74e3-44a8-841c-25b0068a1457-logs\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:39 crc kubenswrapper[4884]: E1210 00:57:39.887812 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-combined-ca-bundle podName:7dd09275-74e3-44a8-841c-25b0068a1457 nodeName:}" failed. No retries permitted until 2025-12-10 00:57:40.387781306 +0000 UTC m=+1633.465738423 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-combined-ca-bundle") pod "7dd09275-74e3-44a8-841c-25b0068a1457" (UID: "7dd09275-74e3-44a8-841c-25b0068a1457") : error deleting /var/lib/kubelet/pods/7dd09275-74e3-44a8-841c-25b0068a1457/volume-subpaths: remove /var/lib/kubelet/pods/7dd09275-74e3-44a8-841c-25b0068a1457/volume-subpaths: no such file or directory Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.887944 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7dd09275-74e3-44a8-841c-25b0068a1457-kube-api-access-4q4qn" (OuterVolumeSpecName: "kube-api-access-4q4qn") pod "7dd09275-74e3-44a8-841c-25b0068a1457" (UID: "7dd09275-74e3-44a8-841c-25b0068a1457"). InnerVolumeSpecName "kube-api-access-4q4qn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.891574 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-config-data" (OuterVolumeSpecName: "config-data") pod "7dd09275-74e3-44a8-841c-25b0068a1457" (UID: "7dd09275-74e3-44a8-841c-25b0068a1457"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.935602 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4q4qn\" (UniqueName: \"kubernetes.io/projected/7dd09275-74e3-44a8-841c-25b0068a1457-kube-api-access-4q4qn\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.935636 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.962560 4884 generic.go:334] "Generic (PLEG): container finished" podID="7dd09275-74e3-44a8-841c-25b0068a1457" containerID="53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7" exitCode=0 Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.962600 4884 generic.go:334] "Generic (PLEG): container finished" podID="7dd09275-74e3-44a8-841c-25b0068a1457" containerID="a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee" exitCode=143 Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.962614 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.962705 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7dd09275-74e3-44a8-841c-25b0068a1457","Type":"ContainerDied","Data":"53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7"} Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.962779 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7dd09275-74e3-44a8-841c-25b0068a1457","Type":"ContainerDied","Data":"a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee"} Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.962798 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7dd09275-74e3-44a8-841c-25b0068a1457","Type":"ContainerDied","Data":"8d03a79b2954bca614fbc08ed3c031943860a02c886c865801b3fc4ba080b25b"} Dec 10 00:57:39 crc kubenswrapper[4884]: I1210 00:57:39.962818 4884 scope.go:117] "RemoveContainer" containerID="53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.003306 4884 scope.go:117] "RemoveContainer" containerID="a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.022199 4884 scope.go:117] "RemoveContainer" containerID="53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7" Dec 10 00:57:40 crc kubenswrapper[4884]: E1210 00:57:40.022721 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7\": container with ID starting with 53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7 not found: ID does not exist" containerID="53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.022766 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7"} err="failed to get container status \"53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7\": rpc error: code = NotFound desc = could not find container \"53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7\": container with ID starting with 53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7 not found: ID does not exist" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.022798 4884 scope.go:117] "RemoveContainer" containerID="a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee" Dec 10 00:57:40 crc kubenswrapper[4884]: E1210 00:57:40.023186 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee\": container with ID starting with a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee not found: ID does not exist" containerID="a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.023232 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee"} err="failed to get container status \"a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee\": rpc error: code = NotFound desc = could not find container \"a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee\": container with ID starting with a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee not found: ID does not exist" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.023259 4884 scope.go:117] "RemoveContainer" containerID="53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.023602 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7"} err="failed to get container status \"53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7\": rpc error: code = NotFound desc = could not find container \"53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7\": container with ID starting with 53be607033685eb484e690e2ea86e8cee639c893b3409fdfb49f957a1c040aa7 not found: ID does not exist" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.023638 4884 scope.go:117] "RemoveContainer" containerID="a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.023892 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee"} err="failed to get container status \"a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee\": rpc error: code = NotFound desc = could not find container \"a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee\": container with ID starting with a5dcd903b14aeceb9353a288d0a21500de2a4da496a0a4020c7504506ccdd3ee not found: ID does not exist" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.284969 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.445403 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-combined-ca-bundle\") pod \"7dd09275-74e3-44a8-841c-25b0068a1457\" (UID: \"7dd09275-74e3-44a8-841c-25b0068a1457\") " Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.446009 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-scripts\") pod \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.446054 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-combined-ca-bundle\") pod \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.446113 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-config-data\") pod \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.446144 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xs7tx\" (UniqueName: \"kubernetes.io/projected/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-kube-api-access-xs7tx\") pod \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\" (UID: \"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3\") " Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.449912 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7dd09275-74e3-44a8-841c-25b0068a1457" (UID: "7dd09275-74e3-44a8-841c-25b0068a1457"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.453675 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-scripts" (OuterVolumeSpecName: "scripts") pod "ac5ac12f-a8ae-4710-a058-2eed2b61a0e3" (UID: "ac5ac12f-a8ae-4710-a058-2eed2b61a0e3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.454167 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-kube-api-access-xs7tx" (OuterVolumeSpecName: "kube-api-access-xs7tx") pod "ac5ac12f-a8ae-4710-a058-2eed2b61a0e3" (UID: "ac5ac12f-a8ae-4710-a058-2eed2b61a0e3"). InnerVolumeSpecName "kube-api-access-xs7tx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.477083 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ac5ac12f-a8ae-4710-a058-2eed2b61a0e3" (UID: "ac5ac12f-a8ae-4710-a058-2eed2b61a0e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.478649 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-config-data" (OuterVolumeSpecName: "config-data") pod "ac5ac12f-a8ae-4710-a058-2eed2b61a0e3" (UID: "ac5ac12f-a8ae-4710-a058-2eed2b61a0e3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.548776 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.548808 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.548818 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.548829 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xs7tx\" (UniqueName: \"kubernetes.io/projected/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3-kube-api-access-xs7tx\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.548838 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7dd09275-74e3-44a8-841c-25b0068a1457-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.623060 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.631087 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.682126 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:57:40 crc kubenswrapper[4884]: E1210 00:57:40.682935 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5ac12f-a8ae-4710-a058-2eed2b61a0e3" containerName="nova-manage" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.682954 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5ac12f-a8ae-4710-a058-2eed2b61a0e3" containerName="nova-manage" Dec 10 00:57:40 crc kubenswrapper[4884]: E1210 00:57:40.682971 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8078d872-21e8-4dfa-a907-5b3ce3759920" containerName="registry-server" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.682978 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8078d872-21e8-4dfa-a907-5b3ce3759920" containerName="registry-server" Dec 10 00:57:40 crc kubenswrapper[4884]: E1210 00:57:40.683004 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dd09275-74e3-44a8-841c-25b0068a1457" containerName="nova-metadata-log" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.683011 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dd09275-74e3-44a8-841c-25b0068a1457" containerName="nova-metadata-log" Dec 10 00:57:40 crc kubenswrapper[4884]: E1210 00:57:40.683035 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8078d872-21e8-4dfa-a907-5b3ce3759920" containerName="extract-utilities" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.683041 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8078d872-21e8-4dfa-a907-5b3ce3759920" containerName="extract-utilities" Dec 10 00:57:40 crc kubenswrapper[4884]: E1210 00:57:40.683068 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dd09275-74e3-44a8-841c-25b0068a1457" containerName="nova-metadata-metadata" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.683075 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dd09275-74e3-44a8-841c-25b0068a1457" containerName="nova-metadata-metadata" Dec 10 00:57:40 crc kubenswrapper[4884]: E1210 00:57:40.683086 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8078d872-21e8-4dfa-a907-5b3ce3759920" containerName="extract-content" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.683091 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8078d872-21e8-4dfa-a907-5b3ce3759920" containerName="extract-content" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.683476 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dd09275-74e3-44a8-841c-25b0068a1457" containerName="nova-metadata-log" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.683505 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8078d872-21e8-4dfa-a907-5b3ce3759920" containerName="registry-server" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.683522 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5ac12f-a8ae-4710-a058-2eed2b61a0e3" containerName="nova-manage" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.683554 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dd09275-74e3-44a8-841c-25b0068a1457" containerName="nova-metadata-metadata" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.685572 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.688167 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.690930 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.699987 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.864066 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-config-data\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.864218 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.864267 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.864315 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qjxr\" (UniqueName: \"kubernetes.io/projected/502dbb40-c907-4f1f-b590-565e665bf0fc-kube-api-access-6qjxr\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.864385 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/502dbb40-c907-4f1f-b590-565e665bf0fc-logs\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.966731 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.968399 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qjxr\" (UniqueName: \"kubernetes.io/projected/502dbb40-c907-4f1f-b590-565e665bf0fc-kube-api-access-6qjxr\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.968757 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/502dbb40-c907-4f1f-b590-565e665bf0fc-logs\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.969046 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-config-data\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.969482 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/502dbb40-c907-4f1f-b590-565e665bf0fc-logs\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.969660 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.977293 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.991146 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.996363 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qjxr\" (UniqueName: \"kubernetes.io/projected/502dbb40-c907-4f1f-b590-565e665bf0fc-kube-api-access-6qjxr\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:40 crc kubenswrapper[4884]: I1210 00:57:40.996622 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-config-data\") pod \"nova-metadata-0\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " pod="openstack/nova-metadata-0" Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.006192 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-2m7nt" event={"ID":"ac5ac12f-a8ae-4710-a058-2eed2b61a0e3","Type":"ContainerDied","Data":"b2bb41b7be07e93db6085d34fe9846322221af042b90e4a4a4e029a5d0ee98ad"} Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.006456 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2bb41b7be07e93db6085d34fe9846322221af042b90e4a4a4e029a5d0ee98ad" Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.006720 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-2m7nt" Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.009583 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.016646 4884 generic.go:334] "Generic (PLEG): container finished" podID="2068f8d7-d11e-4ae1-845b-6a542dfe62fe" containerID="a68414962cf847f04f21f34dcd1048893c60fc9263491ca6f386e0eb50c9e06b" exitCode=0 Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.016718 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-5nqf7" event={"ID":"2068f8d7-d11e-4ae1-845b-6a542dfe62fe","Type":"ContainerDied","Data":"a68414962cf847f04f21f34dcd1048893c60fc9263491ca6f386e0eb50c9e06b"} Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.246382 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.246906 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f7b3a947-00f5-41a8-98f7-e1c8571f8309" containerName="nova-api-log" containerID="cri-o://3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045" gracePeriod=30 Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.247059 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f7b3a947-00f5-41a8-98f7-e1c8571f8309" containerName="nova-api-api" containerID="cri-o://24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339" gracePeriod=30 Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.261917 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.262117 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="77b234d2-981d-4198-a62b-3d0b68bde48a" containerName="nova-scheduler-scheduler" containerID="cri-o://738e0fb808ffe22cfac765cfb266fc72cc28270e1a4fb6784fc1dcd1ff93b524" gracePeriod=30 Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.271132 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.299696 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7dd09275-74e3-44a8-841c-25b0068a1457" path="/var/lib/kubelet/pods/7dd09275-74e3-44a8-841c-25b0068a1457/volumes" Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.367557 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:57:41 crc kubenswrapper[4884]: I1210 00:57:41.941975 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.030968 4884 generic.go:334] "Generic (PLEG): container finished" podID="77b234d2-981d-4198-a62b-3d0b68bde48a" containerID="738e0fb808ffe22cfac765cfb266fc72cc28270e1a4fb6784fc1dcd1ff93b524" exitCode=0 Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.031214 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"77b234d2-981d-4198-a62b-3d0b68bde48a","Type":"ContainerDied","Data":"738e0fb808ffe22cfac765cfb266fc72cc28270e1a4fb6784fc1dcd1ff93b524"} Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.033686 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"502dbb40-c907-4f1f-b590-565e665bf0fc","Type":"ContainerStarted","Data":"5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675"} Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.033722 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"502dbb40-c907-4f1f-b590-565e665bf0fc","Type":"ContainerStarted","Data":"b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902"} Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.033738 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"502dbb40-c907-4f1f-b590-565e665bf0fc","Type":"ContainerStarted","Data":"90ef9935407730e9a5a131865120042c0a4fc43604b91839f4b3ba104f336c5a"} Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.033912 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="502dbb40-c907-4f1f-b590-565e665bf0fc" containerName="nova-metadata-log" containerID="cri-o://b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902" gracePeriod=30 Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.034040 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="502dbb40-c907-4f1f-b590-565e665bf0fc" containerName="nova-metadata-metadata" containerID="cri-o://5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675" gracePeriod=30 Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.038693 4884 generic.go:334] "Generic (PLEG): container finished" podID="f7b3a947-00f5-41a8-98f7-e1c8571f8309" containerID="24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339" exitCode=0 Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.038723 4884 generic.go:334] "Generic (PLEG): container finished" podID="f7b3a947-00f5-41a8-98f7-e1c8571f8309" containerID="3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045" exitCode=143 Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.038889 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.039352 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f7b3a947-00f5-41a8-98f7-e1c8571f8309","Type":"ContainerDied","Data":"24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339"} Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.039382 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f7b3a947-00f5-41a8-98f7-e1c8571f8309","Type":"ContainerDied","Data":"3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045"} Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.039394 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f7b3a947-00f5-41a8-98f7-e1c8571f8309","Type":"ContainerDied","Data":"c08526694c46477e7d7d66d094ac479dca8470bca778f10fbf3c01413fd22c85"} Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.039408 4884 scope.go:117] "RemoveContainer" containerID="24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.075342 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.075308536 podStartE2EDuration="2.075308536s" podCreationTimestamp="2025-12-10 00:57:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:57:42.060007471 +0000 UTC m=+1635.137964598" watchObservedRunningTime="2025-12-10 00:57:42.075308536 +0000 UTC m=+1635.153265653" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.091711 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p7ncl\" (UniqueName: \"kubernetes.io/projected/f7b3a947-00f5-41a8-98f7-e1c8571f8309-kube-api-access-p7ncl\") pod \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.092231 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b3a947-00f5-41a8-98f7-e1c8571f8309-config-data\") pod \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.092496 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f7b3a947-00f5-41a8-98f7-e1c8571f8309-logs\") pod \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.092859 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b3a947-00f5-41a8-98f7-e1c8571f8309-combined-ca-bundle\") pod \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\" (UID: \"f7b3a947-00f5-41a8-98f7-e1c8571f8309\") " Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.095139 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7b3a947-00f5-41a8-98f7-e1c8571f8309-logs" (OuterVolumeSpecName: "logs") pod "f7b3a947-00f5-41a8-98f7-e1c8571f8309" (UID: "f7b3a947-00f5-41a8-98f7-e1c8571f8309"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.101178 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7b3a947-00f5-41a8-98f7-e1c8571f8309-kube-api-access-p7ncl" (OuterVolumeSpecName: "kube-api-access-p7ncl") pod "f7b3a947-00f5-41a8-98f7-e1c8571f8309" (UID: "f7b3a947-00f5-41a8-98f7-e1c8571f8309"). InnerVolumeSpecName "kube-api-access-p7ncl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.101525 4884 scope.go:117] "RemoveContainer" containerID="3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.138675 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b3a947-00f5-41a8-98f7-e1c8571f8309-config-data" (OuterVolumeSpecName: "config-data") pod "f7b3a947-00f5-41a8-98f7-e1c8571f8309" (UID: "f7b3a947-00f5-41a8-98f7-e1c8571f8309"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.143944 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b3a947-00f5-41a8-98f7-e1c8571f8309-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f7b3a947-00f5-41a8-98f7-e1c8571f8309" (UID: "f7b3a947-00f5-41a8-98f7-e1c8571f8309"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.157861 4884 scope.go:117] "RemoveContainer" containerID="24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339" Dec 10 00:57:42 crc kubenswrapper[4884]: E1210 00:57:42.165988 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339\": container with ID starting with 24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339 not found: ID does not exist" containerID="24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.166029 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339"} err="failed to get container status \"24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339\": rpc error: code = NotFound desc = could not find container \"24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339\": container with ID starting with 24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339 not found: ID does not exist" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.166054 4884 scope.go:117] "RemoveContainer" containerID="3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045" Dec 10 00:57:42 crc kubenswrapper[4884]: E1210 00:57:42.166993 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045\": container with ID starting with 3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045 not found: ID does not exist" containerID="3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.167022 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045"} err="failed to get container status \"3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045\": rpc error: code = NotFound desc = could not find container \"3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045\": container with ID starting with 3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045 not found: ID does not exist" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.167039 4884 scope.go:117] "RemoveContainer" containerID="24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.167348 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339"} err="failed to get container status \"24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339\": rpc error: code = NotFound desc = could not find container \"24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339\": container with ID starting with 24f7002d24bacb99f3ac4328235248cc0b23bceabe1ee8fcd8650dce1ed0f339 not found: ID does not exist" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.167371 4884 scope.go:117] "RemoveContainer" containerID="3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.167627 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045"} err="failed to get container status \"3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045\": rpc error: code = NotFound desc = could not find container \"3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045\": container with ID starting with 3f1b694452dc212d0cab00b5cb935ea9c3a206a9de52a7ce686a2e4f37be7045 not found: ID does not exist" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.195933 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p7ncl\" (UniqueName: \"kubernetes.io/projected/f7b3a947-00f5-41a8-98f7-e1c8571f8309-kube-api-access-p7ncl\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.196165 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b3a947-00f5-41a8-98f7-e1c8571f8309-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.196243 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f7b3a947-00f5-41a8-98f7-e1c8571f8309-logs\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.196347 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b3a947-00f5-41a8-98f7-e1c8571f8309-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.215675 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.286677 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.298146 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77b234d2-981d-4198-a62b-3d0b68bde48a-config-data\") pod \"77b234d2-981d-4198-a62b-3d0b68bde48a\" (UID: \"77b234d2-981d-4198-a62b-3d0b68bde48a\") " Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.298507 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77b234d2-981d-4198-a62b-3d0b68bde48a-combined-ca-bundle\") pod \"77b234d2-981d-4198-a62b-3d0b68bde48a\" (UID: \"77b234d2-981d-4198-a62b-3d0b68bde48a\") " Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.298618 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmxdm\" (UniqueName: \"kubernetes.io/projected/77b234d2-981d-4198-a62b-3d0b68bde48a-kube-api-access-zmxdm\") pod \"77b234d2-981d-4198-a62b-3d0b68bde48a\" (UID: \"77b234d2-981d-4198-a62b-3d0b68bde48a\") " Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.303303 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77b234d2-981d-4198-a62b-3d0b68bde48a-kube-api-access-zmxdm" (OuterVolumeSpecName: "kube-api-access-zmxdm") pod "77b234d2-981d-4198-a62b-3d0b68bde48a" (UID: "77b234d2-981d-4198-a62b-3d0b68bde48a"). InnerVolumeSpecName "kube-api-access-zmxdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.308802 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.349857 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77b234d2-981d-4198-a62b-3d0b68bde48a-config-data" (OuterVolumeSpecName: "config-data") pod "77b234d2-981d-4198-a62b-3d0b68bde48a" (UID: "77b234d2-981d-4198-a62b-3d0b68bde48a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.377053 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77b234d2-981d-4198-a62b-3d0b68bde48a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77b234d2-981d-4198-a62b-3d0b68bde48a" (UID: "77b234d2-981d-4198-a62b-3d0b68bde48a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.393011 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-d5kx9"] Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.393253 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" podUID="966092e4-bb27-4e13-97e1-46f55c562a7f" containerName="dnsmasq-dns" containerID="cri-o://db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09" gracePeriod=10 Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.402154 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77b234d2-981d-4198-a62b-3d0b68bde48a-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.402628 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77b234d2-981d-4198-a62b-3d0b68bde48a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.402734 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmxdm\" (UniqueName: \"kubernetes.io/projected/77b234d2-981d-4198-a62b-3d0b68bde48a-kube-api-access-zmxdm\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.430790 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.450936 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.468527 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 10 00:57:42 crc kubenswrapper[4884]: E1210 00:57:42.469189 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77b234d2-981d-4198-a62b-3d0b68bde48a" containerName="nova-scheduler-scheduler" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.469277 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="77b234d2-981d-4198-a62b-3d0b68bde48a" containerName="nova-scheduler-scheduler" Dec 10 00:57:42 crc kubenswrapper[4884]: E1210 00:57:42.469366 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b3a947-00f5-41a8-98f7-e1c8571f8309" containerName="nova-api-log" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.469446 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b3a947-00f5-41a8-98f7-e1c8571f8309" containerName="nova-api-log" Dec 10 00:57:42 crc kubenswrapper[4884]: E1210 00:57:42.469524 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b3a947-00f5-41a8-98f7-e1c8571f8309" containerName="nova-api-api" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.469576 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b3a947-00f5-41a8-98f7-e1c8571f8309" containerName="nova-api-api" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.469841 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="77b234d2-981d-4198-a62b-3d0b68bde48a" containerName="nova-scheduler-scheduler" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.469927 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b3a947-00f5-41a8-98f7-e1c8571f8309" containerName="nova-api-api" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.469985 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b3a947-00f5-41a8-98f7-e1c8571f8309" containerName="nova-api-log" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.474051 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.477270 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.485682 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.498539 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.609511 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-combined-ca-bundle\") pod \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.609735 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-scripts\") pod \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.609775 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-config-data\") pod \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.609809 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnqst\" (UniqueName: \"kubernetes.io/projected/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-kube-api-access-gnqst\") pod \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\" (UID: \"2068f8d7-d11e-4ae1-845b-6a542dfe62fe\") " Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.611173 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/438ed569-d56e-4c42-ae94-dac8b30dc2fc-config-data\") pod \"nova-api-0\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.611516 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438ed569-d56e-4c42-ae94-dac8b30dc2fc-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.620841 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fv28f\" (UniqueName: \"kubernetes.io/projected/438ed569-d56e-4c42-ae94-dac8b30dc2fc-kube-api-access-fv28f\") pod \"nova-api-0\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.621090 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/438ed569-d56e-4c42-ae94-dac8b30dc2fc-logs\") pod \"nova-api-0\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.641743 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-scripts" (OuterVolumeSpecName: "scripts") pod "2068f8d7-d11e-4ae1-845b-6a542dfe62fe" (UID: "2068f8d7-d11e-4ae1-845b-6a542dfe62fe"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.656666 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-kube-api-access-gnqst" (OuterVolumeSpecName: "kube-api-access-gnqst") pod "2068f8d7-d11e-4ae1-845b-6a542dfe62fe" (UID: "2068f8d7-d11e-4ae1-845b-6a542dfe62fe"). InnerVolumeSpecName "kube-api-access-gnqst". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.676567 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2068f8d7-d11e-4ae1-845b-6a542dfe62fe" (UID: "2068f8d7-d11e-4ae1-845b-6a542dfe62fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.722615 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438ed569-d56e-4c42-ae94-dac8b30dc2fc-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.722691 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fv28f\" (UniqueName: \"kubernetes.io/projected/438ed569-d56e-4c42-ae94-dac8b30dc2fc-kube-api-access-fv28f\") pod \"nova-api-0\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.722739 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/438ed569-d56e-4c42-ae94-dac8b30dc2fc-logs\") pod \"nova-api-0\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.722799 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/438ed569-d56e-4c42-ae94-dac8b30dc2fc-config-data\") pod \"nova-api-0\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.722844 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.722856 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.722866 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnqst\" (UniqueName: \"kubernetes.io/projected/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-kube-api-access-gnqst\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.725111 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-config-data" (OuterVolumeSpecName: "config-data") pod "2068f8d7-d11e-4ae1-845b-6a542dfe62fe" (UID: "2068f8d7-d11e-4ae1-845b-6a542dfe62fe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.725682 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/438ed569-d56e-4c42-ae94-dac8b30dc2fc-logs\") pod \"nova-api-0\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.748256 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/438ed569-d56e-4c42-ae94-dac8b30dc2fc-config-data\") pod \"nova-api-0\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.748262 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fv28f\" (UniqueName: \"kubernetes.io/projected/438ed569-d56e-4c42-ae94-dac8b30dc2fc-kube-api-access-fv28f\") pod \"nova-api-0\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.749849 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438ed569-d56e-4c42-ae94-dac8b30dc2fc-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.859095 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.861119 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2068f8d7-d11e-4ae1-845b-6a542dfe62fe-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:42 crc kubenswrapper[4884]: I1210 00:57:42.961079 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.064291 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-dns-svc\") pod \"966092e4-bb27-4e13-97e1-46f55c562a7f\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.064765 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-dns-swift-storage-0\") pod \"966092e4-bb27-4e13-97e1-46f55c562a7f\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.064839 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-ovsdbserver-sb\") pod \"966092e4-bb27-4e13-97e1-46f55c562a7f\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.064976 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-ovsdbserver-nb\") pod \"966092e4-bb27-4e13-97e1-46f55c562a7f\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.065062 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2hmp\" (UniqueName: \"kubernetes.io/projected/966092e4-bb27-4e13-97e1-46f55c562a7f-kube-api-access-d2hmp\") pod \"966092e4-bb27-4e13-97e1-46f55c562a7f\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.065102 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-config\") pod \"966092e4-bb27-4e13-97e1-46f55c562a7f\" (UID: \"966092e4-bb27-4e13-97e1-46f55c562a7f\") " Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.083218 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/966092e4-bb27-4e13-97e1-46f55c562a7f-kube-api-access-d2hmp" (OuterVolumeSpecName: "kube-api-access-d2hmp") pod "966092e4-bb27-4e13-97e1-46f55c562a7f" (UID: "966092e4-bb27-4e13-97e1-46f55c562a7f"). InnerVolumeSpecName "kube-api-access-d2hmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.088853 4884 generic.go:334] "Generic (PLEG): container finished" podID="966092e4-bb27-4e13-97e1-46f55c562a7f" containerID="db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09" exitCode=0 Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.088936 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.088956 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" event={"ID":"966092e4-bb27-4e13-97e1-46f55c562a7f","Type":"ContainerDied","Data":"db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09"} Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.088986 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-d5kx9" event={"ID":"966092e4-bb27-4e13-97e1-46f55c562a7f","Type":"ContainerDied","Data":"6882f0fc5675004a5153bccf724fb4554f37aa038a1fcd804b2fd950133f6c70"} Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.089006 4884 scope.go:117] "RemoveContainer" containerID="db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.109349 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-5nqf7" event={"ID":"2068f8d7-d11e-4ae1-845b-6a542dfe62fe","Type":"ContainerDied","Data":"a6bd76a1ea7da9008ebab0c4d8b84cd5eede51e8e73cec4049d517e9781c6b8d"} Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.109399 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6bd76a1ea7da9008ebab0c4d8b84cd5eede51e8e73cec4049d517e9781c6b8d" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.109749 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-5nqf7" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.117542 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"77b234d2-981d-4198-a62b-3d0b68bde48a","Type":"ContainerDied","Data":"54e50cc2906e579919222f5ad73f5628b046b7718c4c49ca7e218697db3ee7fe"} Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.117644 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.137402 4884 generic.go:334] "Generic (PLEG): container finished" podID="502dbb40-c907-4f1f-b590-565e665bf0fc" containerID="b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902" exitCode=143 Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.137491 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"502dbb40-c907-4f1f-b590-565e665bf0fc","Type":"ContainerDied","Data":"b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902"} Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.145820 4884 scope.go:117] "RemoveContainer" containerID="9c89aab72737d2173dcd213a3e2c5e5a26d262bc0e3af2013f7ca268400878fd" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.146286 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-config" (OuterVolumeSpecName: "config") pod "966092e4-bb27-4e13-97e1-46f55c562a7f" (UID: "966092e4-bb27-4e13-97e1-46f55c562a7f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.168891 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "966092e4-bb27-4e13-97e1-46f55c562a7f" (UID: "966092e4-bb27-4e13-97e1-46f55c562a7f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.171749 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.171772 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.171784 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2hmp\" (UniqueName: \"kubernetes.io/projected/966092e4-bb27-4e13-97e1-46f55c562a7f-kube-api-access-d2hmp\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.191787 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "966092e4-bb27-4e13-97e1-46f55c562a7f" (UID: "966092e4-bb27-4e13-97e1-46f55c562a7f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.194376 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "966092e4-bb27-4e13-97e1-46f55c562a7f" (UID: "966092e4-bb27-4e13-97e1-46f55c562a7f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.197047 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.200623 4884 scope.go:117] "RemoveContainer" containerID="db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09" Dec 10 00:57:43 crc kubenswrapper[4884]: E1210 00:57:43.201289 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09\": container with ID starting with db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09 not found: ID does not exist" containerID="db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.201317 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09"} err="failed to get container status \"db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09\": rpc error: code = NotFound desc = could not find container \"db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09\": container with ID starting with db05149c7412abb648499445685f07fe5bfa669eeabea7859726d21b70d0fc09 not found: ID does not exist" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.201338 4884 scope.go:117] "RemoveContainer" containerID="9c89aab72737d2173dcd213a3e2c5e5a26d262bc0e3af2013f7ca268400878fd" Dec 10 00:57:43 crc kubenswrapper[4884]: E1210 00:57:43.201763 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c89aab72737d2173dcd213a3e2c5e5a26d262bc0e3af2013f7ca268400878fd\": container with ID starting with 9c89aab72737d2173dcd213a3e2c5e5a26d262bc0e3af2013f7ca268400878fd not found: ID does not exist" containerID="9c89aab72737d2173dcd213a3e2c5e5a26d262bc0e3af2013f7ca268400878fd" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.201777 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c89aab72737d2173dcd213a3e2c5e5a26d262bc0e3af2013f7ca268400878fd"} err="failed to get container status \"9c89aab72737d2173dcd213a3e2c5e5a26d262bc0e3af2013f7ca268400878fd\": rpc error: code = NotFound desc = could not find container \"9c89aab72737d2173dcd213a3e2c5e5a26d262bc0e3af2013f7ca268400878fd\": container with ID starting with 9c89aab72737d2173dcd213a3e2c5e5a26d262bc0e3af2013f7ca268400878fd not found: ID does not exist" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.201796 4884 scope.go:117] "RemoveContainer" containerID="738e0fb808ffe22cfac765cfb266fc72cc28270e1a4fb6784fc1dcd1ff93b524" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.202077 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "966092e4-bb27-4e13-97e1-46f55c562a7f" (UID: "966092e4-bb27-4e13-97e1-46f55c562a7f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.228488 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.250349 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:57:43 crc kubenswrapper[4884]: E1210 00:57:43.251914 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="966092e4-bb27-4e13-97e1-46f55c562a7f" containerName="init" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.251931 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="966092e4-bb27-4e13-97e1-46f55c562a7f" containerName="init" Dec 10 00:57:43 crc kubenswrapper[4884]: E1210 00:57:43.251943 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="966092e4-bb27-4e13-97e1-46f55c562a7f" containerName="dnsmasq-dns" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.251949 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="966092e4-bb27-4e13-97e1-46f55c562a7f" containerName="dnsmasq-dns" Dec 10 00:57:43 crc kubenswrapper[4884]: E1210 00:57:43.252394 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2068f8d7-d11e-4ae1-845b-6a542dfe62fe" containerName="aodh-db-sync" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.252408 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2068f8d7-d11e-4ae1-845b-6a542dfe62fe" containerName="aodh-db-sync" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.252814 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="966092e4-bb27-4e13-97e1-46f55c562a7f" containerName="dnsmasq-dns" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.252830 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2068f8d7-d11e-4ae1-845b-6a542dfe62fe" containerName="aodh-db-sync" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.254698 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.256741 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.273128 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.274365 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.274390 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.274401 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/966092e4-bb27-4e13-97e1-46f55c562a7f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.301765 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77b234d2-981d-4198-a62b-3d0b68bde48a" path="/var/lib/kubelet/pods/77b234d2-981d-4198-a62b-3d0b68bde48a/volumes" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.302350 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7b3a947-00f5-41a8-98f7-e1c8571f8309" path="/var/lib/kubelet/pods/f7b3a947-00f5-41a8-98f7-e1c8571f8309/volumes" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.376017 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbklt\" (UniqueName: \"kubernetes.io/projected/5664a663-e831-4d21-a144-02dfe91bf470-kube-api-access-rbklt\") pod \"nova-scheduler-0\" (UID: \"5664a663-e831-4d21-a144-02dfe91bf470\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.378357 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5664a663-e831-4d21-a144-02dfe91bf470-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5664a663-e831-4d21-a144-02dfe91bf470\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.378426 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5664a663-e831-4d21-a144-02dfe91bf470-config-data\") pod \"nova-scheduler-0\" (UID: \"5664a663-e831-4d21-a144-02dfe91bf470\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.386931 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:57:43 crc kubenswrapper[4884]: W1210 00:57:43.392585 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod438ed569_d56e_4c42_ae94_dac8b30dc2fc.slice/crio-4143cc5724ba885d8d126a002bd75934530d7f846e0500d2a9dbfc0e6fad20af WatchSource:0}: Error finding container 4143cc5724ba885d8d126a002bd75934530d7f846e0500d2a9dbfc0e6fad20af: Status 404 returned error can't find the container with id 4143cc5724ba885d8d126a002bd75934530d7f846e0500d2a9dbfc0e6fad20af Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.431270 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-d5kx9"] Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.444682 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-d5kx9"] Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.480978 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbklt\" (UniqueName: \"kubernetes.io/projected/5664a663-e831-4d21-a144-02dfe91bf470-kube-api-access-rbklt\") pod \"nova-scheduler-0\" (UID: \"5664a663-e831-4d21-a144-02dfe91bf470\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.481905 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5664a663-e831-4d21-a144-02dfe91bf470-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5664a663-e831-4d21-a144-02dfe91bf470\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.481952 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5664a663-e831-4d21-a144-02dfe91bf470-config-data\") pod \"nova-scheduler-0\" (UID: \"5664a663-e831-4d21-a144-02dfe91bf470\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.486633 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5664a663-e831-4d21-a144-02dfe91bf470-config-data\") pod \"nova-scheduler-0\" (UID: \"5664a663-e831-4d21-a144-02dfe91bf470\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.487351 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5664a663-e831-4d21-a144-02dfe91bf470-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"5664a663-e831-4d21-a144-02dfe91bf470\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.502068 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbklt\" (UniqueName: \"kubernetes.io/projected/5664a663-e831-4d21-a144-02dfe91bf470-kube-api-access-rbklt\") pod \"nova-scheduler-0\" (UID: \"5664a663-e831-4d21-a144-02dfe91bf470\") " pod="openstack/nova-scheduler-0" Dec 10 00:57:43 crc kubenswrapper[4884]: I1210 00:57:43.576628 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 00:57:44 crc kubenswrapper[4884]: I1210 00:57:44.151277 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"438ed569-d56e-4c42-ae94-dac8b30dc2fc","Type":"ContainerStarted","Data":"18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2"} Dec 10 00:57:44 crc kubenswrapper[4884]: I1210 00:57:44.151928 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"438ed569-d56e-4c42-ae94-dac8b30dc2fc","Type":"ContainerStarted","Data":"6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a"} Dec 10 00:57:44 crc kubenswrapper[4884]: I1210 00:57:44.151947 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"438ed569-d56e-4c42-ae94-dac8b30dc2fc","Type":"ContainerStarted","Data":"4143cc5724ba885d8d126a002bd75934530d7f846e0500d2a9dbfc0e6fad20af"} Dec 10 00:57:44 crc kubenswrapper[4884]: I1210 00:57:44.170694 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.17067489 podStartE2EDuration="2.17067489s" podCreationTimestamp="2025-12-10 00:57:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:57:44.169281602 +0000 UTC m=+1637.247238749" watchObservedRunningTime="2025-12-10 00:57:44.17067489 +0000 UTC m=+1637.248632017" Dec 10 00:57:44 crc kubenswrapper[4884]: I1210 00:57:44.292246 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:57:45 crc kubenswrapper[4884]: I1210 00:57:45.186678 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5664a663-e831-4d21-a144-02dfe91bf470","Type":"ContainerStarted","Data":"4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8"} Dec 10 00:57:45 crc kubenswrapper[4884]: I1210 00:57:45.188195 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5664a663-e831-4d21-a144-02dfe91bf470","Type":"ContainerStarted","Data":"a599d23b50de8d33b26aa127c04d335835c2ec8bb2abb655459ca8c4a3bb5ecd"} Dec 10 00:57:45 crc kubenswrapper[4884]: I1210 00:57:45.220515 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.22049243 podStartE2EDuration="2.22049243s" podCreationTimestamp="2025-12-10 00:57:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:57:45.210318424 +0000 UTC m=+1638.288275591" watchObservedRunningTime="2025-12-10 00:57:45.22049243 +0000 UTC m=+1638.298449557" Dec 10 00:57:45 crc kubenswrapper[4884]: I1210 00:57:45.302222 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="966092e4-bb27-4e13-97e1-46f55c562a7f" path="/var/lib/kubelet/pods/966092e4-bb27-4e13-97e1-46f55c562a7f/volumes" Dec 10 00:57:46 crc kubenswrapper[4884]: I1210 00:57:46.010561 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 00:57:46 crc kubenswrapper[4884]: I1210 00:57:46.010633 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 00:57:46 crc kubenswrapper[4884]: I1210 00:57:46.114030 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 10 00:57:46 crc kubenswrapper[4884]: I1210 00:57:46.200418 4884 generic.go:334] "Generic (PLEG): container finished" podID="595f30cf-13ba-48bb-949d-393f11660091" containerID="1d164ad48e4dd6427611499a874a60b310b0ad64b7401cf5175ec98429159a81" exitCode=0 Dec 10 00:57:46 crc kubenswrapper[4884]: I1210 00:57:46.200584 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-mrh8v" event={"ID":"595f30cf-13ba-48bb-949d-393f11660091","Type":"ContainerDied","Data":"1d164ad48e4dd6427611499a874a60b310b0ad64b7401cf5175ec98429159a81"} Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.214060 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.230070 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.232969 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.233197 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.235062 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-pd287" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.242753 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.366877 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-scripts\") pod \"aodh-0\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.367444 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-combined-ca-bundle\") pod \"aodh-0\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.367538 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkblk\" (UniqueName: \"kubernetes.io/projected/42adb60e-d792-453f-a207-41beb4c9fa48-kube-api-access-hkblk\") pod \"aodh-0\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.367567 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-config-data\") pod \"aodh-0\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.469912 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-combined-ca-bundle\") pod \"aodh-0\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.469990 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkblk\" (UniqueName: \"kubernetes.io/projected/42adb60e-d792-453f-a207-41beb4c9fa48-kube-api-access-hkblk\") pod \"aodh-0\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.470019 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-config-data\") pod \"aodh-0\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.470165 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-scripts\") pod \"aodh-0\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.480454 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-scripts\") pod \"aodh-0\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.482591 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-config-data\") pod \"aodh-0\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.483993 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-combined-ca-bundle\") pod \"aodh-0\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.500546 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkblk\" (UniqueName: \"kubernetes.io/projected/42adb60e-d792-453f-a207-41beb4c9fa48-kube-api-access-hkblk\") pod \"aodh-0\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.582661 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.717752 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.881295 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wskbn\" (UniqueName: \"kubernetes.io/projected/595f30cf-13ba-48bb-949d-393f11660091-kube-api-access-wskbn\") pod \"595f30cf-13ba-48bb-949d-393f11660091\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.881351 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-scripts\") pod \"595f30cf-13ba-48bb-949d-393f11660091\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.881411 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-combined-ca-bundle\") pod \"595f30cf-13ba-48bb-949d-393f11660091\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.882143 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-config-data\") pod \"595f30cf-13ba-48bb-949d-393f11660091\" (UID: \"595f30cf-13ba-48bb-949d-393f11660091\") " Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.886191 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/595f30cf-13ba-48bb-949d-393f11660091-kube-api-access-wskbn" (OuterVolumeSpecName: "kube-api-access-wskbn") pod "595f30cf-13ba-48bb-949d-393f11660091" (UID: "595f30cf-13ba-48bb-949d-393f11660091"). InnerVolumeSpecName "kube-api-access-wskbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.895630 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-scripts" (OuterVolumeSpecName: "scripts") pod "595f30cf-13ba-48bb-949d-393f11660091" (UID: "595f30cf-13ba-48bb-949d-393f11660091"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.927703 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-config-data" (OuterVolumeSpecName: "config-data") pod "595f30cf-13ba-48bb-949d-393f11660091" (UID: "595f30cf-13ba-48bb-949d-393f11660091"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.952524 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "595f30cf-13ba-48bb-949d-393f11660091" (UID: "595f30cf-13ba-48bb-949d-393f11660091"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.985228 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wskbn\" (UniqueName: \"kubernetes.io/projected/595f30cf-13ba-48bb-949d-393f11660091-kube-api-access-wskbn\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.985524 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.985542 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:47 crc kubenswrapper[4884]: I1210 00:57:47.985553 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/595f30cf-13ba-48bb-949d-393f11660091-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.067302 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.236608 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-mrh8v" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.236693 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-mrh8v" event={"ID":"595f30cf-13ba-48bb-949d-393f11660091","Type":"ContainerDied","Data":"b845831f13bfe9cdd4ae6d250fec3617f5eb7c78b7bbd5b063bda89a17da563b"} Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.236728 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b845831f13bfe9cdd4ae6d250fec3617f5eb7c78b7bbd5b063bda89a17da563b" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.239869 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"42adb60e-d792-453f-a207-41beb4c9fa48","Type":"ContainerStarted","Data":"01823eb97217f5d2ea4f55e9992ab0e7c52f976916f1e10fe80cd58ab66f35ab"} Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.325271 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 10 00:57:48 crc kubenswrapper[4884]: E1210 00:57:48.325794 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="595f30cf-13ba-48bb-949d-393f11660091" containerName="nova-cell1-conductor-db-sync" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.325815 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="595f30cf-13ba-48bb-949d-393f11660091" containerName="nova-cell1-conductor-db-sync" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.326069 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="595f30cf-13ba-48bb-949d-393f11660091" containerName="nova-cell1-conductor-db-sync" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.326861 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.335099 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.335276 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.493780 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e77e5c1a-be83-4f84-8454-2e4cd443c2dc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e77e5c1a-be83-4f84-8454-2e4cd443c2dc\") " pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.493852 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rkfhg\" (UniqueName: \"kubernetes.io/projected/e77e5c1a-be83-4f84-8454-2e4cd443c2dc-kube-api-access-rkfhg\") pod \"nova-cell1-conductor-0\" (UID: \"e77e5c1a-be83-4f84-8454-2e4cd443c2dc\") " pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.493879 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e77e5c1a-be83-4f84-8454-2e4cd443c2dc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e77e5c1a-be83-4f84-8454-2e4cd443c2dc\") " pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.576847 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.595657 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e77e5c1a-be83-4f84-8454-2e4cd443c2dc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e77e5c1a-be83-4f84-8454-2e4cd443c2dc\") " pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.595718 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rkfhg\" (UniqueName: \"kubernetes.io/projected/e77e5c1a-be83-4f84-8454-2e4cd443c2dc-kube-api-access-rkfhg\") pod \"nova-cell1-conductor-0\" (UID: \"e77e5c1a-be83-4f84-8454-2e4cd443c2dc\") " pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.595746 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e77e5c1a-be83-4f84-8454-2e4cd443c2dc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e77e5c1a-be83-4f84-8454-2e4cd443c2dc\") " pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.599356 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e77e5c1a-be83-4f84-8454-2e4cd443c2dc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e77e5c1a-be83-4f84-8454-2e4cd443c2dc\") " pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.600284 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e77e5c1a-be83-4f84-8454-2e4cd443c2dc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e77e5c1a-be83-4f84-8454-2e4cd443c2dc\") " pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.616406 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rkfhg\" (UniqueName: \"kubernetes.io/projected/e77e5c1a-be83-4f84-8454-2e4cd443c2dc-kube-api-access-rkfhg\") pod \"nova-cell1-conductor-0\" (UID: \"e77e5c1a-be83-4f84-8454-2e4cd443c2dc\") " pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:48 crc kubenswrapper[4884]: I1210 00:57:48.643036 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:49 crc kubenswrapper[4884]: I1210 00:57:49.149653 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 10 00:57:49 crc kubenswrapper[4884]: W1210 00:57:49.157103 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode77e5c1a_be83_4f84_8454_2e4cd443c2dc.slice/crio-b9ef822f75c249c199e0ed6c4286bacb2d0faee99894e0921515e6e12d7251f9 WatchSource:0}: Error finding container b9ef822f75c249c199e0ed6c4286bacb2d0faee99894e0921515e6e12d7251f9: Status 404 returned error can't find the container with id b9ef822f75c249c199e0ed6c4286bacb2d0faee99894e0921515e6e12d7251f9 Dec 10 00:57:49 crc kubenswrapper[4884]: I1210 00:57:49.256617 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"42adb60e-d792-453f-a207-41beb4c9fa48","Type":"ContainerStarted","Data":"345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45"} Dec 10 00:57:49 crc kubenswrapper[4884]: I1210 00:57:49.258425 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e77e5c1a-be83-4f84-8454-2e4cd443c2dc","Type":"ContainerStarted","Data":"b9ef822f75c249c199e0ed6c4286bacb2d0faee99894e0921515e6e12d7251f9"} Dec 10 00:57:50 crc kubenswrapper[4884]: I1210 00:57:50.282067 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e77e5c1a-be83-4f84-8454-2e4cd443c2dc","Type":"ContainerStarted","Data":"7b6980564792411a91a2d3a2f01a094ad7ed1732f358863eaa87715b13203f92"} Dec 10 00:57:50 crc kubenswrapper[4884]: I1210 00:57:50.282686 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:50 crc kubenswrapper[4884]: I1210 00:57:50.307550 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.30753336 podStartE2EDuration="2.30753336s" podCreationTimestamp="2025-12-10 00:57:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:57:50.301923608 +0000 UTC m=+1643.379880735" watchObservedRunningTime="2025-12-10 00:57:50.30753336 +0000 UTC m=+1643.385490477" Dec 10 00:57:51 crc kubenswrapper[4884]: I1210 00:57:51.004268 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:51 crc kubenswrapper[4884]: I1210 00:57:51.004938 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="ceilometer-central-agent" containerID="cri-o://27e1e02783ef18f16d63f6204f9a26347e3568d95e8fb8df5be64d6db7defae0" gracePeriod=30 Dec 10 00:57:51 crc kubenswrapper[4884]: I1210 00:57:51.005006 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="proxy-httpd" containerID="cri-o://20d66aebf317cc913631679235b1b95a8ad324611cf6925544fec246ed67776b" gracePeriod=30 Dec 10 00:57:51 crc kubenswrapper[4884]: I1210 00:57:51.005074 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="ceilometer-notification-agent" containerID="cri-o://6b21b688c4c983b3192109e446730f5ee48645b4c47b3ff0ca5e743491ca5fe0" gracePeriod=30 Dec 10 00:57:51 crc kubenswrapper[4884]: I1210 00:57:51.005057 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="sg-core" containerID="cri-o://e33df5de5d62567a511a10f871fe9f7fa21a223a1d8b946246512431a453352b" gracePeriod=30 Dec 10 00:57:51 crc kubenswrapper[4884]: I1210 00:57:51.287060 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:57:51 crc kubenswrapper[4884]: E1210 00:57:51.288156 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:57:51 crc kubenswrapper[4884]: I1210 00:57:51.295307 4884 generic.go:334] "Generic (PLEG): container finished" podID="b90dc1d7-e444-4c26-9425-748256d855c4" containerID="20d66aebf317cc913631679235b1b95a8ad324611cf6925544fec246ed67776b" exitCode=0 Dec 10 00:57:51 crc kubenswrapper[4884]: I1210 00:57:51.295339 4884 generic.go:334] "Generic (PLEG): container finished" podID="b90dc1d7-e444-4c26-9425-748256d855c4" containerID="e33df5de5d62567a511a10f871fe9f7fa21a223a1d8b946246512431a453352b" exitCode=2 Dec 10 00:57:51 crc kubenswrapper[4884]: I1210 00:57:51.300499 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b90dc1d7-e444-4c26-9425-748256d855c4","Type":"ContainerDied","Data":"20d66aebf317cc913631679235b1b95a8ad324611cf6925544fec246ed67776b"} Dec 10 00:57:51 crc kubenswrapper[4884]: I1210 00:57:51.300546 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b90dc1d7-e444-4c26-9425-748256d855c4","Type":"ContainerDied","Data":"e33df5de5d62567a511a10f871fe9f7fa21a223a1d8b946246512431a453352b"} Dec 10 00:57:51 crc kubenswrapper[4884]: I1210 00:57:51.300562 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"42adb60e-d792-453f-a207-41beb4c9fa48","Type":"ContainerStarted","Data":"87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b"} Dec 10 00:57:52 crc kubenswrapper[4884]: I1210 00:57:52.310119 4884 generic.go:334] "Generic (PLEG): container finished" podID="b90dc1d7-e444-4c26-9425-748256d855c4" containerID="27e1e02783ef18f16d63f6204f9a26347e3568d95e8fb8df5be64d6db7defae0" exitCode=0 Dec 10 00:57:52 crc kubenswrapper[4884]: I1210 00:57:52.310170 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b90dc1d7-e444-4c26-9425-748256d855c4","Type":"ContainerDied","Data":"27e1e02783ef18f16d63f6204f9a26347e3568d95e8fb8df5be64d6db7defae0"} Dec 10 00:57:52 crc kubenswrapper[4884]: I1210 00:57:52.860879 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 00:57:52 crc kubenswrapper[4884]: I1210 00:57:52.861252 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 00:57:53 crc kubenswrapper[4884]: I1210 00:57:53.368405 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"42adb60e-d792-453f-a207-41beb4c9fa48","Type":"ContainerStarted","Data":"fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27"} Dec 10 00:57:53 crc kubenswrapper[4884]: I1210 00:57:53.576892 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 10 00:57:53 crc kubenswrapper[4884]: I1210 00:57:53.624150 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 10 00:57:53 crc kubenswrapper[4884]: I1210 00:57:53.733176 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Dec 10 00:57:53 crc kubenswrapper[4884]: I1210 00:57:53.945198 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.228:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 00:57:53 crc kubenswrapper[4884]: I1210 00:57:53.945226 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.228:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 00:57:54 crc kubenswrapper[4884]: I1210 00:57:54.383780 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"42adb60e-d792-453f-a207-41beb4c9fa48","Type":"ContainerStarted","Data":"a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064"} Dec 10 00:57:54 crc kubenswrapper[4884]: I1210 00:57:54.384055 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-listener" containerID="cri-o://a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064" gracePeriod=30 Dec 10 00:57:54 crc kubenswrapper[4884]: I1210 00:57:54.384058 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-notifier" containerID="cri-o://fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27" gracePeriod=30 Dec 10 00:57:54 crc kubenswrapper[4884]: I1210 00:57:54.384073 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-api" containerID="cri-o://345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45" gracePeriod=30 Dec 10 00:57:54 crc kubenswrapper[4884]: I1210 00:57:54.384092 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-evaluator" containerID="cri-o://87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b" gracePeriod=30 Dec 10 00:57:54 crc kubenswrapper[4884]: I1210 00:57:54.419554 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=1.5040808509999999 podStartE2EDuration="7.419530627s" podCreationTimestamp="2025-12-10 00:57:47 +0000 UTC" firstStartedPulling="2025-12-10 00:57:48.073189303 +0000 UTC m=+1641.151146410" lastFinishedPulling="2025-12-10 00:57:53.988639069 +0000 UTC m=+1647.066596186" observedRunningTime="2025-12-10 00:57:54.40709527 +0000 UTC m=+1647.485052407" watchObservedRunningTime="2025-12-10 00:57:54.419530627 +0000 UTC m=+1647.497487754" Dec 10 00:57:54 crc kubenswrapper[4884]: I1210 00:57:54.428978 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 10 00:57:55 crc kubenswrapper[4884]: I1210 00:57:55.396090 4884 generic.go:334] "Generic (PLEG): container finished" podID="42adb60e-d792-453f-a207-41beb4c9fa48" containerID="fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27" exitCode=0 Dec 10 00:57:55 crc kubenswrapper[4884]: I1210 00:57:55.396131 4884 generic.go:334] "Generic (PLEG): container finished" podID="42adb60e-d792-453f-a207-41beb4c9fa48" containerID="87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b" exitCode=0 Dec 10 00:57:55 crc kubenswrapper[4884]: I1210 00:57:55.396141 4884 generic.go:334] "Generic (PLEG): container finished" podID="42adb60e-d792-453f-a207-41beb4c9fa48" containerID="345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45" exitCode=0 Dec 10 00:57:55 crc kubenswrapper[4884]: I1210 00:57:55.396168 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"42adb60e-d792-453f-a207-41beb4c9fa48","Type":"ContainerDied","Data":"fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27"} Dec 10 00:57:55 crc kubenswrapper[4884]: I1210 00:57:55.396210 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"42adb60e-d792-453f-a207-41beb4c9fa48","Type":"ContainerDied","Data":"87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b"} Dec 10 00:57:55 crc kubenswrapper[4884]: I1210 00:57:55.396220 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"42adb60e-d792-453f-a207-41beb4c9fa48","Type":"ContainerDied","Data":"345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45"} Dec 10 00:57:55 crc kubenswrapper[4884]: I1210 00:57:55.402289 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 00:57:55 crc kubenswrapper[4884]: I1210 00:57:55.402569 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="ad50acf2-d8ef-4a8c-a594-dedfc823cf9f" containerName="kube-state-metrics" containerID="cri-o://c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114" gracePeriod=30 Dec 10 00:57:55 crc kubenswrapper[4884]: I1210 00:57:55.532075 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 00:57:55 crc kubenswrapper[4884]: I1210 00:57:55.532295 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mysqld-exporter-0" podUID="0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4" containerName="mysqld-exporter" containerID="cri-o://10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca" gracePeriod=30 Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.147337 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.255566 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdghx\" (UniqueName: \"kubernetes.io/projected/ad50acf2-d8ef-4a8c-a594-dedfc823cf9f-kube-api-access-bdghx\") pod \"ad50acf2-d8ef-4a8c-a594-dedfc823cf9f\" (UID: \"ad50acf2-d8ef-4a8c-a594-dedfc823cf9f\") " Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.262221 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad50acf2-d8ef-4a8c-a594-dedfc823cf9f-kube-api-access-bdghx" (OuterVolumeSpecName: "kube-api-access-bdghx") pod "ad50acf2-d8ef-4a8c-a594-dedfc823cf9f" (UID: "ad50acf2-d8ef-4a8c-a594-dedfc823cf9f"). InnerVolumeSpecName "kube-api-access-bdghx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.362259 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdghx\" (UniqueName: \"kubernetes.io/projected/ad50acf2-d8ef-4a8c-a594-dedfc823cf9f-kube-api-access-bdghx\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.370065 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.418382 4884 generic.go:334] "Generic (PLEG): container finished" podID="ad50acf2-d8ef-4a8c-a594-dedfc823cf9f" containerID="c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114" exitCode=2 Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.418465 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ad50acf2-d8ef-4a8c-a594-dedfc823cf9f","Type":"ContainerDied","Data":"c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114"} Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.418494 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ad50acf2-d8ef-4a8c-a594-dedfc823cf9f","Type":"ContainerDied","Data":"a21332546eb6181f8efe619ac3fd0fdb741b32fb0d502930a316c78aa5b667ff"} Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.418510 4884 scope.go:117] "RemoveContainer" containerID="c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.418628 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.423745 4884 generic.go:334] "Generic (PLEG): container finished" podID="0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4" containerID="10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca" exitCode=2 Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.423815 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4","Type":"ContainerDied","Data":"10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca"} Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.423844 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4","Type":"ContainerDied","Data":"9331ae8a79ec6b3dc1c183e568778d9b3ee0603a8f68db5d1a1e12bde91c198a"} Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.423852 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.425911 4884 generic.go:334] "Generic (PLEG): container finished" podID="b90dc1d7-e444-4c26-9425-748256d855c4" containerID="6b21b688c4c983b3192109e446730f5ee48645b4c47b3ff0ca5e743491ca5fe0" exitCode=0 Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.425938 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b90dc1d7-e444-4c26-9425-748256d855c4","Type":"ContainerDied","Data":"6b21b688c4c983b3192109e446730f5ee48645b4c47b3ff0ca5e743491ca5fe0"} Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.477494 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-config-data\") pod \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\" (UID: \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\") " Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.477822 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9npz7\" (UniqueName: \"kubernetes.io/projected/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-kube-api-access-9npz7\") pod \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\" (UID: \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\") " Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.477925 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-combined-ca-bundle\") pod \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\" (UID: \"0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4\") " Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.491174 4884 scope.go:117] "RemoveContainer" containerID="c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114" Dec 10 00:57:56 crc kubenswrapper[4884]: E1210 00:57:56.494115 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114\": container with ID starting with c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114 not found: ID does not exist" containerID="c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.494170 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114"} err="failed to get container status \"c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114\": rpc error: code = NotFound desc = could not find container \"c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114\": container with ID starting with c7534f3981c3499327a78c70976e8dfa2f5099bc6bfe8e313a08aa202e465114 not found: ID does not exist" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.494200 4884 scope.go:117] "RemoveContainer" containerID="10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.526661 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.528595 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-kube-api-access-9npz7" (OuterVolumeSpecName: "kube-api-access-9npz7") pod "0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4" (UID: "0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4"). InnerVolumeSpecName "kube-api-access-9npz7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.552801 4884 scope.go:117] "RemoveContainer" containerID="10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.554031 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4" (UID: "0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:56 crc kubenswrapper[4884]: E1210 00:57:56.561734 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca\": container with ID starting with 10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca not found: ID does not exist" containerID="10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.561784 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca"} err="failed to get container status \"10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca\": rpc error: code = NotFound desc = could not find container \"10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca\": container with ID starting with 10b8df64d3a6d0c45b680e27a584ec1994521f68ca2e97b87d3f779e3c8714ca not found: ID does not exist" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.569340 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.580969 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9npz7\" (UniqueName: \"kubernetes.io/projected/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-kube-api-access-9npz7\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.581339 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.584345 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-config-data" (OuterVolumeSpecName: "config-data") pod "0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4" (UID: "0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.588286 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 00:57:56 crc kubenswrapper[4884]: E1210 00:57:56.588853 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad50acf2-d8ef-4a8c-a594-dedfc823cf9f" containerName="kube-state-metrics" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.588873 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad50acf2-d8ef-4a8c-a594-dedfc823cf9f" containerName="kube-state-metrics" Dec 10 00:57:56 crc kubenswrapper[4884]: E1210 00:57:56.588901 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4" containerName="mysqld-exporter" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.588908 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4" containerName="mysqld-exporter" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.589104 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad50acf2-d8ef-4a8c-a594-dedfc823cf9f" containerName="kube-state-metrics" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.589136 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4" containerName="mysqld-exporter" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.589943 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.592463 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.595457 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.597880 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.653520 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.684849 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/a68f803e-f637-4336-af9c-04110737be37-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"a68f803e-f637-4336-af9c-04110737be37\") " pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.684935 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/a68f803e-f637-4336-af9c-04110737be37-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"a68f803e-f637-4336-af9c-04110737be37\") " pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.684967 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4sgk\" (UniqueName: \"kubernetes.io/projected/a68f803e-f637-4336-af9c-04110737be37-kube-api-access-n4sgk\") pod \"kube-state-metrics-0\" (UID: \"a68f803e-f637-4336-af9c-04110737be37\") " pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.685155 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68f803e-f637-4336-af9c-04110737be37-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"a68f803e-f637-4336-af9c-04110737be37\") " pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.685238 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.758165 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.787180 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-scripts\") pod \"b90dc1d7-e444-4c26-9425-748256d855c4\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.787252 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfxz9\" (UniqueName: \"kubernetes.io/projected/b90dc1d7-e444-4c26-9425-748256d855c4-kube-api-access-kfxz9\") pod \"b90dc1d7-e444-4c26-9425-748256d855c4\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.787734 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-combined-ca-bundle\") pod \"b90dc1d7-e444-4c26-9425-748256d855c4\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.787781 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b90dc1d7-e444-4c26-9425-748256d855c4-log-httpd\") pod \"b90dc1d7-e444-4c26-9425-748256d855c4\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.787820 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b90dc1d7-e444-4c26-9425-748256d855c4-run-httpd\") pod \"b90dc1d7-e444-4c26-9425-748256d855c4\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.788639 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b90dc1d7-e444-4c26-9425-748256d855c4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b90dc1d7-e444-4c26-9425-748256d855c4" (UID: "b90dc1d7-e444-4c26-9425-748256d855c4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.788702 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-config-data\") pod \"b90dc1d7-e444-4c26-9425-748256d855c4\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.788756 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-sg-core-conf-yaml\") pod \"b90dc1d7-e444-4c26-9425-748256d855c4\" (UID: \"b90dc1d7-e444-4c26-9425-748256d855c4\") " Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.788958 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b90dc1d7-e444-4c26-9425-748256d855c4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b90dc1d7-e444-4c26-9425-748256d855c4" (UID: "b90dc1d7-e444-4c26-9425-748256d855c4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.789661 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68f803e-f637-4336-af9c-04110737be37-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"a68f803e-f637-4336-af9c-04110737be37\") " pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.790850 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.790945 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-scripts" (OuterVolumeSpecName: "scripts") pod "b90dc1d7-e444-4c26-9425-748256d855c4" (UID: "b90dc1d7-e444-4c26-9425-748256d855c4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.793281 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b90dc1d7-e444-4c26-9425-748256d855c4-kube-api-access-kfxz9" (OuterVolumeSpecName: "kube-api-access-kfxz9") pod "b90dc1d7-e444-4c26-9425-748256d855c4" (UID: "b90dc1d7-e444-4c26-9425-748256d855c4"). InnerVolumeSpecName "kube-api-access-kfxz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.792412 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/a68f803e-f637-4336-af9c-04110737be37-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"a68f803e-f637-4336-af9c-04110737be37\") " pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.794276 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/a68f803e-f637-4336-af9c-04110737be37-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"a68f803e-f637-4336-af9c-04110737be37\") " pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.795110 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4sgk\" (UniqueName: \"kubernetes.io/projected/a68f803e-f637-4336-af9c-04110737be37-kube-api-access-n4sgk\") pod \"kube-state-metrics-0\" (UID: \"a68f803e-f637-4336-af9c-04110737be37\") " pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.795443 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.795513 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfxz9\" (UniqueName: \"kubernetes.io/projected/b90dc1d7-e444-4c26-9425-748256d855c4-kube-api-access-kfxz9\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.795578 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b90dc1d7-e444-4c26-9425-748256d855c4-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.795636 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b90dc1d7-e444-4c26-9425-748256d855c4-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.795469 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a68f803e-f637-4336-af9c-04110737be37-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"a68f803e-f637-4336-af9c-04110737be37\") " pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.796328 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/a68f803e-f637-4336-af9c-04110737be37-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"a68f803e-f637-4336-af9c-04110737be37\") " pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.802282 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/a68f803e-f637-4336-af9c-04110737be37-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"a68f803e-f637-4336-af9c-04110737be37\") " pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.806492 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 00:57:56 crc kubenswrapper[4884]: E1210 00:57:56.807071 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="ceilometer-notification-agent" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.807090 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="ceilometer-notification-agent" Dec 10 00:57:56 crc kubenswrapper[4884]: E1210 00:57:56.807106 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="ceilometer-central-agent" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.807113 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="ceilometer-central-agent" Dec 10 00:57:56 crc kubenswrapper[4884]: E1210 00:57:56.807132 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="proxy-httpd" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.807140 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="proxy-httpd" Dec 10 00:57:56 crc kubenswrapper[4884]: E1210 00:57:56.807153 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="sg-core" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.807159 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="sg-core" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.807342 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="ceilometer-notification-agent" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.807352 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="sg-core" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.807371 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="ceilometer-central-agent" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.807383 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" containerName="proxy-httpd" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.810583 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.812209 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"mysqld-exporter-config-data" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.812404 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-mysqld-exporter-svc" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.816383 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4sgk\" (UniqueName: \"kubernetes.io/projected/a68f803e-f637-4336-af9c-04110737be37-kube-api-access-n4sgk\") pod \"kube-state-metrics-0\" (UID: \"a68f803e-f637-4336-af9c-04110737be37\") " pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.824072 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.838405 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b90dc1d7-e444-4c26-9425-748256d855c4" (UID: "b90dc1d7-e444-4c26-9425-748256d855c4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:56 crc kubenswrapper[4884]: E1210 00:57:56.863666 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0ff8e152_3ae1_4ed0_a3ea_03fcbc4fc1b4.slice\": RecentStats: unable to find data in memory cache]" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.893652 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b90dc1d7-e444-4c26-9425-748256d855c4" (UID: "b90dc1d7-e444-4c26-9425-748256d855c4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.897871 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bz66\" (UniqueName: \"kubernetes.io/projected/c230f916-e118-40c7-9696-ee437fc34468-kube-api-access-8bz66\") pod \"mysqld-exporter-0\" (UID: \"c230f916-e118-40c7-9696-ee437fc34468\") " pod="openstack/mysqld-exporter-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.897929 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c230f916-e118-40c7-9696-ee437fc34468-config-data\") pod \"mysqld-exporter-0\" (UID: \"c230f916-e118-40c7-9696-ee437fc34468\") " pod="openstack/mysqld-exporter-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.897997 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/c230f916-e118-40c7-9696-ee437fc34468-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"c230f916-e118-40c7-9696-ee437fc34468\") " pod="openstack/mysqld-exporter-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.898135 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c230f916-e118-40c7-9696-ee437fc34468-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"c230f916-e118-40c7-9696-ee437fc34468\") " pod="openstack/mysqld-exporter-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.898543 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.898573 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.919854 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-config-data" (OuterVolumeSpecName: "config-data") pod "b90dc1d7-e444-4c26-9425-748256d855c4" (UID: "b90dc1d7-e444-4c26-9425-748256d855c4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.963350 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 10 00:57:56 crc kubenswrapper[4884]: I1210 00:57:56.999857 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bz66\" (UniqueName: \"kubernetes.io/projected/c230f916-e118-40c7-9696-ee437fc34468-kube-api-access-8bz66\") pod \"mysqld-exporter-0\" (UID: \"c230f916-e118-40c7-9696-ee437fc34468\") " pod="openstack/mysqld-exporter-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.000221 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c230f916-e118-40c7-9696-ee437fc34468-config-data\") pod \"mysqld-exporter-0\" (UID: \"c230f916-e118-40c7-9696-ee437fc34468\") " pod="openstack/mysqld-exporter-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.000964 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/c230f916-e118-40c7-9696-ee437fc34468-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"c230f916-e118-40c7-9696-ee437fc34468\") " pod="openstack/mysqld-exporter-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.001377 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c230f916-e118-40c7-9696-ee437fc34468-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"c230f916-e118-40c7-9696-ee437fc34468\") " pod="openstack/mysqld-exporter-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.001502 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b90dc1d7-e444-4c26-9425-748256d855c4-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.004392 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c230f916-e118-40c7-9696-ee437fc34468-config-data\") pod \"mysqld-exporter-0\" (UID: \"c230f916-e118-40c7-9696-ee437fc34468\") " pod="openstack/mysqld-exporter-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.004783 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mysqld-exporter-tls-certs\" (UniqueName: \"kubernetes.io/secret/c230f916-e118-40c7-9696-ee437fc34468-mysqld-exporter-tls-certs\") pod \"mysqld-exporter-0\" (UID: \"c230f916-e118-40c7-9696-ee437fc34468\") " pod="openstack/mysqld-exporter-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.011033 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c230f916-e118-40c7-9696-ee437fc34468-combined-ca-bundle\") pod \"mysqld-exporter-0\" (UID: \"c230f916-e118-40c7-9696-ee437fc34468\") " pod="openstack/mysqld-exporter-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.031695 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bz66\" (UniqueName: \"kubernetes.io/projected/c230f916-e118-40c7-9696-ee437fc34468-kube-api-access-8bz66\") pod \"mysqld-exporter-0\" (UID: \"c230f916-e118-40c7-9696-ee437fc34468\") " pod="openstack/mysqld-exporter-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.131929 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mysqld-exporter-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.305663 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4" path="/var/lib/kubelet/pods/0ff8e152-3ae1-4ed0-a3ea-03fcbc4fc1b4/volumes" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.306817 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad50acf2-d8ef-4a8c-a594-dedfc823cf9f" path="/var/lib/kubelet/pods/ad50acf2-d8ef-4a8c-a594-dedfc823cf9f/volumes" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.457100 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b90dc1d7-e444-4c26-9425-748256d855c4","Type":"ContainerDied","Data":"0f89e1a38a70c6122438ee6181ebb8ebc300f1d967c695a70468375a1ab3fc6d"} Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.457150 4884 scope.go:117] "RemoveContainer" containerID="20d66aebf317cc913631679235b1b95a8ad324611cf6925544fec246ed67776b" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.457153 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.507340 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.517247 4884 scope.go:117] "RemoveContainer" containerID="e33df5de5d62567a511a10f871fe9f7fa21a223a1d8b946246512431a453352b" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.519835 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.534762 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.537914 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.541365 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.541542 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.555491 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.574252 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.579068 4884 scope.go:117] "RemoveContainer" containerID="6b21b688c4c983b3192109e446730f5ee48645b4c47b3ff0ca5e743491ca5fe0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.607929 4884 scope.go:117] "RemoveContainer" containerID="27e1e02783ef18f16d63f6204f9a26347e3568d95e8fb8df5be64d6db7defae0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.617977 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/543a2843-595e-4fa2-91b9-75fcdbddf2c6-run-httpd\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.618029 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/543a2843-595e-4fa2-91b9-75fcdbddf2c6-log-httpd\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.618122 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-scripts\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.618159 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-config-data\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.618235 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.618259 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ftps\" (UniqueName: \"kubernetes.io/projected/543a2843-595e-4fa2-91b9-75fcdbddf2c6-kube-api-access-5ftps\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.618304 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.645349 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mysqld-exporter-0"] Dec 10 00:57:57 crc kubenswrapper[4884]: W1210 00:57:57.646745 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc230f916_e118_40c7_9696_ee437fc34468.slice/crio-e06193acddf5db3976113a68ce1dfbc0ecdbae973778775f21f47d7788598bfc WatchSource:0}: Error finding container e06193acddf5db3976113a68ce1dfbc0ecdbae973778775f21f47d7788598bfc: Status 404 returned error can't find the container with id e06193acddf5db3976113a68ce1dfbc0ecdbae973778775f21f47d7788598bfc Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.707962 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:57 crc kubenswrapper[4884]: E1210 00:57:57.709119 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data kube-api-access-5ftps log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/ceilometer-0" podUID="543a2843-595e-4fa2-91b9-75fcdbddf2c6" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.719760 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-scripts\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.719842 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-config-data\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.719928 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.719948 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ftps\" (UniqueName: \"kubernetes.io/projected/543a2843-595e-4fa2-91b9-75fcdbddf2c6-kube-api-access-5ftps\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.719992 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.720030 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/543a2843-595e-4fa2-91b9-75fcdbddf2c6-run-httpd\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.720045 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/543a2843-595e-4fa2-91b9-75fcdbddf2c6-log-httpd\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.720548 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/543a2843-595e-4fa2-91b9-75fcdbddf2c6-log-httpd\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.724816 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/543a2843-595e-4fa2-91b9-75fcdbddf2c6-run-httpd\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.726408 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.727091 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-config-data\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.727671 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.728564 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-scripts\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:57 crc kubenswrapper[4884]: I1210 00:57:57.739822 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ftps\" (UniqueName: \"kubernetes.io/projected/543a2843-595e-4fa2-91b9-75fcdbddf2c6-kube-api-access-5ftps\") pod \"ceilometer-0\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " pod="openstack/ceilometer-0" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.472190 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"c230f916-e118-40c7-9696-ee437fc34468","Type":"ContainerStarted","Data":"e06193acddf5db3976113a68ce1dfbc0ecdbae973778775f21f47d7788598bfc"} Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.474058 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.474554 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a68f803e-f637-4336-af9c-04110737be37","Type":"ContainerStarted","Data":"2375a92de3fe5105339ab98352115c2edaedba9ea525ebad82a80003bdc27b01"} Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.474642 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a68f803e-f637-4336-af9c-04110737be37","Type":"ContainerStarted","Data":"b5f0d107a5e64206a642498d2b92b6085a3beb5e0893180bb802f690cf93cd4f"} Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.474789 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.517142 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.172946512 podStartE2EDuration="2.517109312s" podCreationTimestamp="2025-12-10 00:57:56 +0000 UTC" firstStartedPulling="2025-12-10 00:57:57.561536375 +0000 UTC m=+1650.639493482" lastFinishedPulling="2025-12-10 00:57:57.905699175 +0000 UTC m=+1650.983656282" observedRunningTime="2025-12-10 00:57:58.50556902 +0000 UTC m=+1651.583526177" watchObservedRunningTime="2025-12-10 00:57:58.517109312 +0000 UTC m=+1651.595066459" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.558930 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.638792 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-config-data\") pod \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.638936 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ftps\" (UniqueName: \"kubernetes.io/projected/543a2843-595e-4fa2-91b9-75fcdbddf2c6-kube-api-access-5ftps\") pod \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.639109 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/543a2843-595e-4fa2-91b9-75fcdbddf2c6-log-httpd\") pod \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.639200 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-combined-ca-bundle\") pod \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.639299 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/543a2843-595e-4fa2-91b9-75fcdbddf2c6-run-httpd\") pod \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.639339 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-sg-core-conf-yaml\") pod \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.639468 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-scripts\") pod \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\" (UID: \"543a2843-595e-4fa2-91b9-75fcdbddf2c6\") " Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.639862 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/543a2843-595e-4fa2-91b9-75fcdbddf2c6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "543a2843-595e-4fa2-91b9-75fcdbddf2c6" (UID: "543a2843-595e-4fa2-91b9-75fcdbddf2c6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.639900 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/543a2843-595e-4fa2-91b9-75fcdbddf2c6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "543a2843-595e-4fa2-91b9-75fcdbddf2c6" (UID: "543a2843-595e-4fa2-91b9-75fcdbddf2c6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.640739 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/543a2843-595e-4fa2-91b9-75fcdbddf2c6-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.640765 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/543a2843-595e-4fa2-91b9-75fcdbddf2c6-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.645130 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-config-data" (OuterVolumeSpecName: "config-data") pod "543a2843-595e-4fa2-91b9-75fcdbddf2c6" (UID: "543a2843-595e-4fa2-91b9-75fcdbddf2c6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.645596 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/543a2843-595e-4fa2-91b9-75fcdbddf2c6-kube-api-access-5ftps" (OuterVolumeSpecName: "kube-api-access-5ftps") pod "543a2843-595e-4fa2-91b9-75fcdbddf2c6" (UID: "543a2843-595e-4fa2-91b9-75fcdbddf2c6"). InnerVolumeSpecName "kube-api-access-5ftps". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.648057 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-scripts" (OuterVolumeSpecName: "scripts") pod "543a2843-595e-4fa2-91b9-75fcdbddf2c6" (UID: "543a2843-595e-4fa2-91b9-75fcdbddf2c6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.648304 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "543a2843-595e-4fa2-91b9-75fcdbddf2c6" (UID: "543a2843-595e-4fa2-91b9-75fcdbddf2c6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.649695 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "543a2843-595e-4fa2-91b9-75fcdbddf2c6" (UID: "543a2843-595e-4fa2-91b9-75fcdbddf2c6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.701455 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.743205 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.743596 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.743617 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ftps\" (UniqueName: \"kubernetes.io/projected/543a2843-595e-4fa2-91b9-75fcdbddf2c6-kube-api-access-5ftps\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.743638 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:58 crc kubenswrapper[4884]: I1210 00:57:58.743655 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/543a2843-595e-4fa2-91b9-75fcdbddf2c6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.308557 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b90dc1d7-e444-4c26-9425-748256d855c4" path="/var/lib/kubelet/pods/b90dc1d7-e444-4c26-9425-748256d855c4/volumes" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.489976 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mysqld-exporter-0" event={"ID":"c230f916-e118-40c7-9696-ee437fc34468","Type":"ContainerStarted","Data":"f21eec16be8ccf06386980be3caa277fce0336b23f70a9f1b58a29f24557d097"} Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.490028 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.519559 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mysqld-exporter-0" podStartSLOduration=2.804413132 podStartE2EDuration="3.519544729s" podCreationTimestamp="2025-12-10 00:57:56 +0000 UTC" firstStartedPulling="2025-12-10 00:57:57.6492627 +0000 UTC m=+1650.727219817" lastFinishedPulling="2025-12-10 00:57:58.364394267 +0000 UTC m=+1651.442351414" observedRunningTime="2025-12-10 00:57:59.516050275 +0000 UTC m=+1652.594007402" watchObservedRunningTime="2025-12-10 00:57:59.519544729 +0000 UTC m=+1652.597501846" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.599955 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.611421 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.621415 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.624314 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.629389 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.629556 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.629668 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.639921 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.766671 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qk8n2\" (UniqueName: \"kubernetes.io/projected/81a53e9a-3d38-45b5-9dc3-23d9140c945d-kube-api-access-qk8n2\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.766729 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.766775 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/81a53e9a-3d38-45b5-9dc3-23d9140c945d-run-httpd\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.766800 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-scripts\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.766826 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.767175 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/81a53e9a-3d38-45b5-9dc3-23d9140c945d-log-httpd\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.767311 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.767596 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-config-data\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.869648 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/81a53e9a-3d38-45b5-9dc3-23d9140c945d-log-httpd\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.869690 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.869755 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-config-data\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.869861 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qk8n2\" (UniqueName: \"kubernetes.io/projected/81a53e9a-3d38-45b5-9dc3-23d9140c945d-kube-api-access-qk8n2\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.869890 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.869909 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/81a53e9a-3d38-45b5-9dc3-23d9140c945d-run-httpd\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.869952 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-scripts\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.869971 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.870339 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/81a53e9a-3d38-45b5-9dc3-23d9140c945d-run-httpd\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.871868 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/81a53e9a-3d38-45b5-9dc3-23d9140c945d-log-httpd\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.875344 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.875526 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.876628 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.879021 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-scripts\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.884509 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qk8n2\" (UniqueName: \"kubernetes.io/projected/81a53e9a-3d38-45b5-9dc3-23d9140c945d-kube-api-access-qk8n2\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.893900 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-config-data\") pod \"ceilometer-0\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " pod="openstack/ceilometer-0" Dec 10 00:57:59 crc kubenswrapper[4884]: I1210 00:57:59.949528 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:58:00 crc kubenswrapper[4884]: I1210 00:58:00.407164 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:58:00 crc kubenswrapper[4884]: I1210 00:58:00.503851 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"81a53e9a-3d38-45b5-9dc3-23d9140c945d","Type":"ContainerStarted","Data":"19e8701a0f74121dbd7786ebd005e72768684181e177aae525792a7cf91388c6"} Dec 10 00:58:01 crc kubenswrapper[4884]: I1210 00:58:01.306019 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="543a2843-595e-4fa2-91b9-75fcdbddf2c6" path="/var/lib/kubelet/pods/543a2843-595e-4fa2-91b9-75fcdbddf2c6/volumes" Dec 10 00:58:01 crc kubenswrapper[4884]: I1210 00:58:01.528917 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"81a53e9a-3d38-45b5-9dc3-23d9140c945d","Type":"ContainerStarted","Data":"bf3d946e7b27bf0e91dd48759ef9c0b5c095788269dc101d7988ca49cb2616d6"} Dec 10 00:58:02 crc kubenswrapper[4884]: I1210 00:58:02.286944 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:58:02 crc kubenswrapper[4884]: E1210 00:58:02.287386 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:58:02 crc kubenswrapper[4884]: I1210 00:58:02.543801 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"81a53e9a-3d38-45b5-9dc3-23d9140c945d","Type":"ContainerStarted","Data":"5f5c90e6ba2d318a8cf7640e468cd7986625cc284689e3a315dde2a7f720a56e"} Dec 10 00:58:02 crc kubenswrapper[4884]: I1210 00:58:02.864378 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 10 00:58:02 crc kubenswrapper[4884]: I1210 00:58:02.864529 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 10 00:58:02 crc kubenswrapper[4884]: I1210 00:58:02.865215 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 00:58:02 crc kubenswrapper[4884]: I1210 00:58:02.865292 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 00:58:02 crc kubenswrapper[4884]: I1210 00:58:02.868661 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 10 00:58:02 crc kubenswrapper[4884]: I1210 00:58:02.870565 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.130614 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-b4xhk"] Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.152960 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-b4xhk"] Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.153486 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.260466 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-ovsdbserver-sb\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.260604 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-dns-svc\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.260637 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-dns-swift-storage-0\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.260656 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-ovsdbserver-nb\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.260704 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-config\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.260761 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fb52z\" (UniqueName: \"kubernetes.io/projected/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-kube-api-access-fb52z\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.362838 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-dns-svc\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.363015 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-ovsdbserver-nb\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.363088 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-dns-swift-storage-0\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.363180 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-config\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.363273 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fb52z\" (UniqueName: \"kubernetes.io/projected/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-kube-api-access-fb52z\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.363410 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-ovsdbserver-sb\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.363914 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-dns-svc\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.363948 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-ovsdbserver-nb\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.364579 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-dns-swift-storage-0\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.364871 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-ovsdbserver-sb\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.365113 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-config\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.388917 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fb52z\" (UniqueName: \"kubernetes.io/projected/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-kube-api-access-fb52z\") pod \"dnsmasq-dns-f84f9ccf-b4xhk\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.489712 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:03 crc kubenswrapper[4884]: I1210 00:58:03.564958 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"81a53e9a-3d38-45b5-9dc3-23d9140c945d","Type":"ContainerStarted","Data":"771f7ce02d543cafdf210b4cf4159e8672851d9eac734db93634765e766605cf"} Dec 10 00:58:04 crc kubenswrapper[4884]: I1210 00:58:04.009635 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-b4xhk"] Dec 10 00:58:04 crc kubenswrapper[4884]: I1210 00:58:04.574797 4884 generic.go:334] "Generic (PLEG): container finished" podID="ecf73cb4-4dc2-4a18-a755-3c083c7ea571" containerID="3a56a6c7e79612294e55ff1ee533ffe52a235f17150ceaa33151ecf19c81914a" exitCode=0 Dec 10 00:58:04 crc kubenswrapper[4884]: I1210 00:58:04.574876 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" event={"ID":"ecf73cb4-4dc2-4a18-a755-3c083c7ea571","Type":"ContainerDied","Data":"3a56a6c7e79612294e55ff1ee533ffe52a235f17150ceaa33151ecf19c81914a"} Dec 10 00:58:04 crc kubenswrapper[4884]: I1210 00:58:04.575191 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" event={"ID":"ecf73cb4-4dc2-4a18-a755-3c083c7ea571","Type":"ContainerStarted","Data":"6cc0c8c1dce67e89204df137c21bc62a55c0f36b86484a41c0653e008a863584"} Dec 10 00:58:04 crc kubenswrapper[4884]: I1210 00:58:04.577906 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"81a53e9a-3d38-45b5-9dc3-23d9140c945d","Type":"ContainerStarted","Data":"0439f44c36df374d2ca856ecc63c2651448f386c948b3a06c7a81330e7fc7de3"} Dec 10 00:58:04 crc kubenswrapper[4884]: I1210 00:58:04.578082 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 00:58:04 crc kubenswrapper[4884]: I1210 00:58:04.629318 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.852622301 podStartE2EDuration="5.629297866s" podCreationTimestamp="2025-12-10 00:57:59 +0000 UTC" firstStartedPulling="2025-12-10 00:58:00.411489494 +0000 UTC m=+1653.489446611" lastFinishedPulling="2025-12-10 00:58:04.188165059 +0000 UTC m=+1657.266122176" observedRunningTime="2025-12-10 00:58:04.617943718 +0000 UTC m=+1657.695900845" watchObservedRunningTime="2025-12-10 00:58:04.629297866 +0000 UTC m=+1657.707254973" Dec 10 00:58:05 crc kubenswrapper[4884]: I1210 00:58:05.596621 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" event={"ID":"ecf73cb4-4dc2-4a18-a755-3c083c7ea571","Type":"ContainerStarted","Data":"210db8a95b30465c4c9dc64069918bef6de762b64ebf7ecea2a087736e9ec629"} Dec 10 00:58:05 crc kubenswrapper[4884]: I1210 00:58:05.596906 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:05 crc kubenswrapper[4884]: I1210 00:58:05.621822 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" podStartSLOduration=2.621802303 podStartE2EDuration="2.621802303s" podCreationTimestamp="2025-12-10 00:58:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:58:05.618940636 +0000 UTC m=+1658.696897763" watchObservedRunningTime="2025-12-10 00:58:05.621802303 +0000 UTC m=+1658.699759420" Dec 10 00:58:05 crc kubenswrapper[4884]: I1210 00:58:05.748933 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:58:05 crc kubenswrapper[4884]: I1210 00:58:05.749184 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" containerName="nova-api-log" containerID="cri-o://6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a" gracePeriod=30 Dec 10 00:58:05 crc kubenswrapper[4884]: I1210 00:58:05.749278 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" containerName="nova-api-api" containerID="cri-o://18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2" gracePeriod=30 Dec 10 00:58:05 crc kubenswrapper[4884]: I1210 00:58:05.891779 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:58:06 crc kubenswrapper[4884]: I1210 00:58:06.616206 4884 generic.go:334] "Generic (PLEG): container finished" podID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" containerID="6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a" exitCode=143 Dec 10 00:58:06 crc kubenswrapper[4884]: I1210 00:58:06.616267 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"438ed569-d56e-4c42-ae94-dac8b30dc2fc","Type":"ContainerDied","Data":"6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a"} Dec 10 00:58:06 crc kubenswrapper[4884]: I1210 00:58:06.616982 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="ceilometer-central-agent" containerID="cri-o://bf3d946e7b27bf0e91dd48759ef9c0b5c095788269dc101d7988ca49cb2616d6" gracePeriod=30 Dec 10 00:58:06 crc kubenswrapper[4884]: I1210 00:58:06.617118 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="ceilometer-notification-agent" containerID="cri-o://5f5c90e6ba2d318a8cf7640e468cd7986625cc284689e3a315dde2a7f720a56e" gracePeriod=30 Dec 10 00:58:06 crc kubenswrapper[4884]: I1210 00:58:06.617149 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="sg-core" containerID="cri-o://771f7ce02d543cafdf210b4cf4159e8672851d9eac734db93634765e766605cf" gracePeriod=30 Dec 10 00:58:06 crc kubenswrapper[4884]: I1210 00:58:06.617204 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="proxy-httpd" containerID="cri-o://0439f44c36df374d2ca856ecc63c2651448f386c948b3a06c7a81330e7fc7de3" gracePeriod=30 Dec 10 00:58:06 crc kubenswrapper[4884]: I1210 00:58:06.985096 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 10 00:58:07 crc kubenswrapper[4884]: E1210 00:58:07.180016 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81a53e9a_3d38_45b5_9dc3_23d9140c945d.slice/crio-conmon-5f5c90e6ba2d318a8cf7640e468cd7986625cc284689e3a315dde2a7f720a56e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81a53e9a_3d38_45b5_9dc3_23d9140c945d.slice/crio-5f5c90e6ba2d318a8cf7640e468cd7986625cc284689e3a315dde2a7f720a56e.scope\": RecentStats: unable to find data in memory cache]" Dec 10 00:58:07 crc kubenswrapper[4884]: I1210 00:58:07.634762 4884 generic.go:334] "Generic (PLEG): container finished" podID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerID="0439f44c36df374d2ca856ecc63c2651448f386c948b3a06c7a81330e7fc7de3" exitCode=0 Dec 10 00:58:07 crc kubenswrapper[4884]: I1210 00:58:07.634815 4884 generic.go:334] "Generic (PLEG): container finished" podID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerID="771f7ce02d543cafdf210b4cf4159e8672851d9eac734db93634765e766605cf" exitCode=2 Dec 10 00:58:07 crc kubenswrapper[4884]: I1210 00:58:07.634832 4884 generic.go:334] "Generic (PLEG): container finished" podID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerID="5f5c90e6ba2d318a8cf7640e468cd7986625cc284689e3a315dde2a7f720a56e" exitCode=0 Dec 10 00:58:07 crc kubenswrapper[4884]: I1210 00:58:07.634926 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"81a53e9a-3d38-45b5-9dc3-23d9140c945d","Type":"ContainerDied","Data":"0439f44c36df374d2ca856ecc63c2651448f386c948b3a06c7a81330e7fc7de3"} Dec 10 00:58:07 crc kubenswrapper[4884]: I1210 00:58:07.634968 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"81a53e9a-3d38-45b5-9dc3-23d9140c945d","Type":"ContainerDied","Data":"771f7ce02d543cafdf210b4cf4159e8672851d9eac734db93634765e766605cf"} Dec 10 00:58:07 crc kubenswrapper[4884]: I1210 00:58:07.634981 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"81a53e9a-3d38-45b5-9dc3-23d9140c945d","Type":"ContainerDied","Data":"5f5c90e6ba2d318a8cf7640e468cd7986625cc284689e3a315dde2a7f720a56e"} Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.508231 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.605330 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvfbz\" (UniqueName: \"kubernetes.io/projected/361cfe8a-f31e-49de-805e-f2d0b259d533-kube-api-access-hvfbz\") pod \"361cfe8a-f31e-49de-805e-f2d0b259d533\" (UID: \"361cfe8a-f31e-49de-805e-f2d0b259d533\") " Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.605535 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/361cfe8a-f31e-49de-805e-f2d0b259d533-combined-ca-bundle\") pod \"361cfe8a-f31e-49de-805e-f2d0b259d533\" (UID: \"361cfe8a-f31e-49de-805e-f2d0b259d533\") " Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.605585 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/361cfe8a-f31e-49de-805e-f2d0b259d533-config-data\") pod \"361cfe8a-f31e-49de-805e-f2d0b259d533\" (UID: \"361cfe8a-f31e-49de-805e-f2d0b259d533\") " Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.623303 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/361cfe8a-f31e-49de-805e-f2d0b259d533-kube-api-access-hvfbz" (OuterVolumeSpecName: "kube-api-access-hvfbz") pod "361cfe8a-f31e-49de-805e-f2d0b259d533" (UID: "361cfe8a-f31e-49de-805e-f2d0b259d533"). InnerVolumeSpecName "kube-api-access-hvfbz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.657786 4884 generic.go:334] "Generic (PLEG): container finished" podID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerID="bf3d946e7b27bf0e91dd48759ef9c0b5c095788269dc101d7988ca49cb2616d6" exitCode=0 Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.657839 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"81a53e9a-3d38-45b5-9dc3-23d9140c945d","Type":"ContainerDied","Data":"bf3d946e7b27bf0e91dd48759ef9c0b5c095788269dc101d7988ca49cb2616d6"} Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.659400 4884 generic.go:334] "Generic (PLEG): container finished" podID="361cfe8a-f31e-49de-805e-f2d0b259d533" containerID="83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82" exitCode=137 Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.659493 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"361cfe8a-f31e-49de-805e-f2d0b259d533","Type":"ContainerDied","Data":"83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82"} Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.659634 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"361cfe8a-f31e-49de-805e-f2d0b259d533","Type":"ContainerDied","Data":"ee8843f078c92e549630f171534ccbfd87e34d77b0f344fe7ef243d03e8a8342"} Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.659527 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.659725 4884 scope.go:117] "RemoveContainer" containerID="83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.662120 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/361cfe8a-f31e-49de-805e-f2d0b259d533-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "361cfe8a-f31e-49de-805e-f2d0b259d533" (UID: "361cfe8a-f31e-49de-805e-f2d0b259d533"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.681254 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/361cfe8a-f31e-49de-805e-f2d0b259d533-config-data" (OuterVolumeSpecName: "config-data") pod "361cfe8a-f31e-49de-805e-f2d0b259d533" (UID: "361cfe8a-f31e-49de-805e-f2d0b259d533"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.707768 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvfbz\" (UniqueName: \"kubernetes.io/projected/361cfe8a-f31e-49de-805e-f2d0b259d533-kube-api-access-hvfbz\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.707795 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/361cfe8a-f31e-49de-805e-f2d0b259d533-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.707804 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/361cfe8a-f31e-49de-805e-f2d0b259d533-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.756989 4884 scope.go:117] "RemoveContainer" containerID="83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.757354 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:58:08 crc kubenswrapper[4884]: E1210 00:58:08.757495 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82\": container with ID starting with 83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82 not found: ID does not exist" containerID="83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.757572 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82"} err="failed to get container status \"83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82\": rpc error: code = NotFound desc = could not find container \"83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82\": container with ID starting with 83313487f334008d560f9f5f45ce04e58b723420deb7be1a6565cb34e19d9a82 not found: ID does not exist" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.910840 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-ceilometer-tls-certs\") pod \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.910914 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-combined-ca-bundle\") pod \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.911001 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/81a53e9a-3d38-45b5-9dc3-23d9140c945d-run-httpd\") pod \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.911522 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-sg-core-conf-yaml\") pod \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.911872 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-config-data\") pod \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.911455 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81a53e9a-3d38-45b5-9dc3-23d9140c945d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "81a53e9a-3d38-45b5-9dc3-23d9140c945d" (UID: "81a53e9a-3d38-45b5-9dc3-23d9140c945d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.912043 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qk8n2\" (UniqueName: \"kubernetes.io/projected/81a53e9a-3d38-45b5-9dc3-23d9140c945d-kube-api-access-qk8n2\") pod \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.912079 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-scripts\") pod \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.912141 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/81a53e9a-3d38-45b5-9dc3-23d9140c945d-log-httpd\") pod \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\" (UID: \"81a53e9a-3d38-45b5-9dc3-23d9140c945d\") " Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.912801 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/81a53e9a-3d38-45b5-9dc3-23d9140c945d-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.913063 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81a53e9a-3d38-45b5-9dc3-23d9140c945d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "81a53e9a-3d38-45b5-9dc3-23d9140c945d" (UID: "81a53e9a-3d38-45b5-9dc3-23d9140c945d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.915537 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-scripts" (OuterVolumeSpecName: "scripts") pod "81a53e9a-3d38-45b5-9dc3-23d9140c945d" (UID: "81a53e9a-3d38-45b5-9dc3-23d9140c945d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.916673 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81a53e9a-3d38-45b5-9dc3-23d9140c945d-kube-api-access-qk8n2" (OuterVolumeSpecName: "kube-api-access-qk8n2") pod "81a53e9a-3d38-45b5-9dc3-23d9140c945d" (UID: "81a53e9a-3d38-45b5-9dc3-23d9140c945d"). InnerVolumeSpecName "kube-api-access-qk8n2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.946080 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "81a53e9a-3d38-45b5-9dc3-23d9140c945d" (UID: "81a53e9a-3d38-45b5-9dc3-23d9140c945d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:08 crc kubenswrapper[4884]: I1210 00:58:08.981884 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "81a53e9a-3d38-45b5-9dc3-23d9140c945d" (UID: "81a53e9a-3d38-45b5-9dc3-23d9140c945d"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.014562 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qk8n2\" (UniqueName: \"kubernetes.io/projected/81a53e9a-3d38-45b5-9dc3-23d9140c945d-kube-api-access-qk8n2\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.014607 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.014627 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/81a53e9a-3d38-45b5-9dc3-23d9140c945d-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.014642 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.014654 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.016247 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "81a53e9a-3d38-45b5-9dc3-23d9140c945d" (UID: "81a53e9a-3d38-45b5-9dc3-23d9140c945d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.023381 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-config-data" (OuterVolumeSpecName: "config-data") pod "81a53e9a-3d38-45b5-9dc3-23d9140c945d" (UID: "81a53e9a-3d38-45b5-9dc3-23d9140c945d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.057473 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.083824 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.095881 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: E1210 00:58:09.096391 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="sg-core" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.096411 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="sg-core" Dec 10 00:58:09 crc kubenswrapper[4884]: E1210 00:58:09.096450 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="proxy-httpd" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.096457 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="proxy-httpd" Dec 10 00:58:09 crc kubenswrapper[4884]: E1210 00:58:09.096481 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="ceilometer-notification-agent" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.096487 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="ceilometer-notification-agent" Dec 10 00:58:09 crc kubenswrapper[4884]: E1210 00:58:09.096501 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="361cfe8a-f31e-49de-805e-f2d0b259d533" containerName="nova-cell1-novncproxy-novncproxy" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.096507 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="361cfe8a-f31e-49de-805e-f2d0b259d533" containerName="nova-cell1-novncproxy-novncproxy" Dec 10 00:58:09 crc kubenswrapper[4884]: E1210 00:58:09.096521 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="ceilometer-central-agent" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.096527 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="ceilometer-central-agent" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.096718 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="ceilometer-central-agent" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.096732 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="proxy-httpd" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.096747 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="361cfe8a-f31e-49de-805e-f2d0b259d533" containerName="nova-cell1-novncproxy-novncproxy" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.096755 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="sg-core" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.096773 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" containerName="ceilometer-notification-agent" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.097554 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.099926 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.099926 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.099968 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.106648 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.117387 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.117440 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81a53e9a-3d38-45b5-9dc3-23d9140c945d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.219159 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/453881c1-b2d7-4b85-a686-68711bc65917-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.219231 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/453881c1-b2d7-4b85-a686-68711bc65917-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.219364 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fk7vn\" (UniqueName: \"kubernetes.io/projected/453881c1-b2d7-4b85-a686-68711bc65917-kube-api-access-fk7vn\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.219407 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/453881c1-b2d7-4b85-a686-68711bc65917-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.219527 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/453881c1-b2d7-4b85-a686-68711bc65917-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.300201 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="361cfe8a-f31e-49de-805e-f2d0b259d533" path="/var/lib/kubelet/pods/361cfe8a-f31e-49de-805e-f2d0b259d533/volumes" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.310852 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.322578 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/453881c1-b2d7-4b85-a686-68711bc65917-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.322692 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/453881c1-b2d7-4b85-a686-68711bc65917-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.322885 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fk7vn\" (UniqueName: \"kubernetes.io/projected/453881c1-b2d7-4b85-a686-68711bc65917-kube-api-access-fk7vn\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.322946 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/453881c1-b2d7-4b85-a686-68711bc65917-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.323106 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/453881c1-b2d7-4b85-a686-68711bc65917-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.327195 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/453881c1-b2d7-4b85-a686-68711bc65917-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.328147 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/453881c1-b2d7-4b85-a686-68711bc65917-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.351925 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/453881c1-b2d7-4b85-a686-68711bc65917-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.355259 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fk7vn\" (UniqueName: \"kubernetes.io/projected/453881c1-b2d7-4b85-a686-68711bc65917-kube-api-access-fk7vn\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.370182 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/453881c1-b2d7-4b85-a686-68711bc65917-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"453881c1-b2d7-4b85-a686-68711bc65917\") " pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.418557 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.424417 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/438ed569-d56e-4c42-ae94-dac8b30dc2fc-config-data\") pod \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.424536 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438ed569-d56e-4c42-ae94-dac8b30dc2fc-combined-ca-bundle\") pod \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.424683 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/438ed569-d56e-4c42-ae94-dac8b30dc2fc-logs\") pod \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.424759 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fv28f\" (UniqueName: \"kubernetes.io/projected/438ed569-d56e-4c42-ae94-dac8b30dc2fc-kube-api-access-fv28f\") pod \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\" (UID: \"438ed569-d56e-4c42-ae94-dac8b30dc2fc\") " Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.427571 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/438ed569-d56e-4c42-ae94-dac8b30dc2fc-logs" (OuterVolumeSpecName: "logs") pod "438ed569-d56e-4c42-ae94-dac8b30dc2fc" (UID: "438ed569-d56e-4c42-ae94-dac8b30dc2fc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.429224 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/438ed569-d56e-4c42-ae94-dac8b30dc2fc-kube-api-access-fv28f" (OuterVolumeSpecName: "kube-api-access-fv28f") pod "438ed569-d56e-4c42-ae94-dac8b30dc2fc" (UID: "438ed569-d56e-4c42-ae94-dac8b30dc2fc"). InnerVolumeSpecName "kube-api-access-fv28f". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.481127 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/438ed569-d56e-4c42-ae94-dac8b30dc2fc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "438ed569-d56e-4c42-ae94-dac8b30dc2fc" (UID: "438ed569-d56e-4c42-ae94-dac8b30dc2fc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.481070 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/438ed569-d56e-4c42-ae94-dac8b30dc2fc-config-data" (OuterVolumeSpecName: "config-data") pod "438ed569-d56e-4c42-ae94-dac8b30dc2fc" (UID: "438ed569-d56e-4c42-ae94-dac8b30dc2fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.528024 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/438ed569-d56e-4c42-ae94-dac8b30dc2fc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.528060 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/438ed569-d56e-4c42-ae94-dac8b30dc2fc-logs\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.528071 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fv28f\" (UniqueName: \"kubernetes.io/projected/438ed569-d56e-4c42-ae94-dac8b30dc2fc-kube-api-access-fv28f\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.528082 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/438ed569-d56e-4c42-ae94-dac8b30dc2fc-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.673590 4884 generic.go:334] "Generic (PLEG): container finished" podID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" containerID="18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2" exitCode=0 Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.674051 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.674524 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"438ed569-d56e-4c42-ae94-dac8b30dc2fc","Type":"ContainerDied","Data":"18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2"} Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.674571 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"438ed569-d56e-4c42-ae94-dac8b30dc2fc","Type":"ContainerDied","Data":"4143cc5724ba885d8d126a002bd75934530d7f846e0500d2a9dbfc0e6fad20af"} Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.674589 4884 scope.go:117] "RemoveContainer" containerID="18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.684494 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"81a53e9a-3d38-45b5-9dc3-23d9140c945d","Type":"ContainerDied","Data":"19e8701a0f74121dbd7786ebd005e72768684181e177aae525792a7cf91388c6"} Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.684685 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.720965 4884 scope.go:117] "RemoveContainer" containerID="6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.722496 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.744561 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.756418 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.760897 4884 scope.go:117] "RemoveContainer" containerID="18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2" Dec 10 00:58:09 crc kubenswrapper[4884]: E1210 00:58:09.761366 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2\": container with ID starting with 18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2 not found: ID does not exist" containerID="18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.761407 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2"} err="failed to get container status \"18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2\": rpc error: code = NotFound desc = could not find container \"18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2\": container with ID starting with 18900e352fc9620dc2af739654fd851f171cffe6c4d23506950d3904593475e2 not found: ID does not exist" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.761533 4884 scope.go:117] "RemoveContainer" containerID="6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a" Dec 10 00:58:09 crc kubenswrapper[4884]: E1210 00:58:09.761972 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a\": container with ID starting with 6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a not found: ID does not exist" containerID="6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.762013 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a"} err="failed to get container status \"6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a\": rpc error: code = NotFound desc = could not find container \"6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a\": container with ID starting with 6be7947545e05e8d8deef7fba20ae4dc137243a9c39570dba99128458b51052a not found: ID does not exist" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.762039 4884 scope.go:117] "RemoveContainer" containerID="0439f44c36df374d2ca856ecc63c2651448f386c948b3a06c7a81330e7fc7de3" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.772799 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.794247 4884 scope.go:117] "RemoveContainer" containerID="771f7ce02d543cafdf210b4cf4159e8672851d9eac734db93634765e766605cf" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.798233 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: E1210 00:58:09.798746 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" containerName="nova-api-log" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.798762 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" containerName="nova-api-log" Dec 10 00:58:09 crc kubenswrapper[4884]: E1210 00:58:09.798790 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" containerName="nova-api-api" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.798796 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" containerName="nova-api-api" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.799020 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" containerName="nova-api-api" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.799058 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" containerName="nova-api-log" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.800182 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.803024 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.803343 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.807013 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.831119 4884 scope.go:117] "RemoveContainer" containerID="5f5c90e6ba2d318a8cf7640e468cd7986625cc284689e3a315dde2a7f720a56e" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.834294 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.843788 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.846626 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.848084 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.848733 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.849567 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.861133 4884 scope.go:117] "RemoveContainer" containerID="bf3d946e7b27bf0e91dd48759ef9c0b5c095788269dc101d7988ca49cb2616d6" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.880329 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.905801 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.935149 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.935196 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37468774-eb34-4fb0-b207-78ba8d8009b2-logs\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.935234 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.935353 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2l5dg\" (UniqueName: \"kubernetes.io/projected/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-kube-api-access-2l5dg\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.935579 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-log-httpd\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.935650 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.935705 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-public-tls-certs\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.935853 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-config-data\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.935910 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-scripts\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.935984 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.936066 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-run-httpd\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.936101 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-config-data\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.936130 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-internal-tls-certs\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:09 crc kubenswrapper[4884]: I1210 00:58:09.936232 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flxrt\" (UniqueName: \"kubernetes.io/projected/37468774-eb34-4fb0-b207-78ba8d8009b2-kube-api-access-flxrt\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.037831 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-config-data\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.038247 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-scripts\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.038300 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.038336 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-run-httpd\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.038365 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-config-data\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.038395 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-internal-tls-certs\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.038678 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flxrt\" (UniqueName: \"kubernetes.io/projected/37468774-eb34-4fb0-b207-78ba8d8009b2-kube-api-access-flxrt\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.039247 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-run-httpd\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.039408 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.039532 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37468774-eb34-4fb0-b207-78ba8d8009b2-logs\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.039645 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.039710 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2l5dg\" (UniqueName: \"kubernetes.io/projected/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-kube-api-access-2l5dg\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.039822 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-log-httpd\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.039865 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.039917 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-public-tls-certs\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.041286 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37468774-eb34-4fb0-b207-78ba8d8009b2-logs\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.042683 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-log-httpd\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.044136 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.044415 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.044654 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-config-data\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.045308 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-scripts\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.046579 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-public-tls-certs\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.053214 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.054077 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-config-data\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.061044 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.061230 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-internal-tls-certs\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.062864 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2l5dg\" (UniqueName: \"kubernetes.io/projected/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-kube-api-access-2l5dg\") pod \"ceilometer-0\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.065333 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flxrt\" (UniqueName: \"kubernetes.io/projected/37468774-eb34-4fb0-b207-78ba8d8009b2-kube-api-access-flxrt\") pod \"nova-api-0\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.121851 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.218304 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.607283 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:58:10 crc kubenswrapper[4884]: W1210 00:58:10.623331 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37468774_eb34_4fb0_b207_78ba8d8009b2.slice/crio-1e41d54e9928b8b18ec5eed65f8e430e3db08dff744461d1e0b7e84b39902208 WatchSource:0}: Error finding container 1e41d54e9928b8b18ec5eed65f8e430e3db08dff744461d1e0b7e84b39902208: Status 404 returned error can't find the container with id 1e41d54e9928b8b18ec5eed65f8e430e3db08dff744461d1e0b7e84b39902208 Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.702597 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"453881c1-b2d7-4b85-a686-68711bc65917","Type":"ContainerStarted","Data":"afa52cb7f5854df0d67e6bb9aeb79b5516c6ce2c5a2e7affb4b635a8a5b07274"} Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.702637 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"453881c1-b2d7-4b85-a686-68711bc65917","Type":"ContainerStarted","Data":"e4e1951c8d821ba2a924dfaa5ae7a90c7c5f7108260aa3ba1cda3a5c8f56848b"} Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.714767 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"37468774-eb34-4fb0-b207-78ba8d8009b2","Type":"ContainerStarted","Data":"1e41d54e9928b8b18ec5eed65f8e430e3db08dff744461d1e0b7e84b39902208"} Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.726548 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.726528124 podStartE2EDuration="1.726528124s" podCreationTimestamp="2025-12-10 00:58:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:58:10.718206618 +0000 UTC m=+1663.796163735" watchObservedRunningTime="2025-12-10 00:58:10.726528124 +0000 UTC m=+1663.804485241" Dec 10 00:58:10 crc kubenswrapper[4884]: I1210 00:58:10.803087 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:58:10 crc kubenswrapper[4884]: W1210 00:58:10.816958 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6829ec3_2b83_4cf5_a39a_b48697dac2b9.slice/crio-486ea03e46a26a4b9e6bd31696977fb533cabc9f7b118678b9d847457d84c302 WatchSource:0}: Error finding container 486ea03e46a26a4b9e6bd31696977fb533cabc9f7b118678b9d847457d84c302: Status 404 returned error can't find the container with id 486ea03e46a26a4b9e6bd31696977fb533cabc9f7b118678b9d847457d84c302 Dec 10 00:58:11 crc kubenswrapper[4884]: I1210 00:58:11.298245 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="438ed569-d56e-4c42-ae94-dac8b30dc2fc" path="/var/lib/kubelet/pods/438ed569-d56e-4c42-ae94-dac8b30dc2fc/volumes" Dec 10 00:58:11 crc kubenswrapper[4884]: I1210 00:58:11.299104 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81a53e9a-3d38-45b5-9dc3-23d9140c945d" path="/var/lib/kubelet/pods/81a53e9a-3d38-45b5-9dc3-23d9140c945d/volumes" Dec 10 00:58:11 crc kubenswrapper[4884]: I1210 00:58:11.736994 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"37468774-eb34-4fb0-b207-78ba8d8009b2","Type":"ContainerStarted","Data":"bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77"} Dec 10 00:58:11 crc kubenswrapper[4884]: I1210 00:58:11.743562 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"37468774-eb34-4fb0-b207-78ba8d8009b2","Type":"ContainerStarted","Data":"f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a"} Dec 10 00:58:11 crc kubenswrapper[4884]: I1210 00:58:11.762796 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6829ec3-2b83-4cf5-a39a-b48697dac2b9","Type":"ContainerStarted","Data":"486ea03e46a26a4b9e6bd31696977fb533cabc9f7b118678b9d847457d84c302"} Dec 10 00:58:11 crc kubenswrapper[4884]: I1210 00:58:11.780017 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.779996222 podStartE2EDuration="2.779996222s" podCreationTimestamp="2025-12-10 00:58:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:58:11.767063433 +0000 UTC m=+1664.845020560" watchObservedRunningTime="2025-12-10 00:58:11.779996222 +0000 UTC m=+1664.857953339" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.577747 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.708474 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6qjxr\" (UniqueName: \"kubernetes.io/projected/502dbb40-c907-4f1f-b590-565e665bf0fc-kube-api-access-6qjxr\") pod \"502dbb40-c907-4f1f-b590-565e665bf0fc\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.708580 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-nova-metadata-tls-certs\") pod \"502dbb40-c907-4f1f-b590-565e665bf0fc\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.708639 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-config-data\") pod \"502dbb40-c907-4f1f-b590-565e665bf0fc\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.708723 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-combined-ca-bundle\") pod \"502dbb40-c907-4f1f-b590-565e665bf0fc\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.708762 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/502dbb40-c907-4f1f-b590-565e665bf0fc-logs\") pod \"502dbb40-c907-4f1f-b590-565e665bf0fc\" (UID: \"502dbb40-c907-4f1f-b590-565e665bf0fc\") " Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.709181 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/502dbb40-c907-4f1f-b590-565e665bf0fc-logs" (OuterVolumeSpecName: "logs") pod "502dbb40-c907-4f1f-b590-565e665bf0fc" (UID: "502dbb40-c907-4f1f-b590-565e665bf0fc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.709695 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/502dbb40-c907-4f1f-b590-565e665bf0fc-logs\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.714410 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/502dbb40-c907-4f1f-b590-565e665bf0fc-kube-api-access-6qjxr" (OuterVolumeSpecName: "kube-api-access-6qjxr") pod "502dbb40-c907-4f1f-b590-565e665bf0fc" (UID: "502dbb40-c907-4f1f-b590-565e665bf0fc"). InnerVolumeSpecName "kube-api-access-6qjxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.742015 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-config-data" (OuterVolumeSpecName: "config-data") pod "502dbb40-c907-4f1f-b590-565e665bf0fc" (UID: "502dbb40-c907-4f1f-b590-565e665bf0fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.743588 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "502dbb40-c907-4f1f-b590-565e665bf0fc" (UID: "502dbb40-c907-4f1f-b590-565e665bf0fc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.771218 4884 generic.go:334] "Generic (PLEG): container finished" podID="502dbb40-c907-4f1f-b590-565e665bf0fc" containerID="5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675" exitCode=137 Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.772257 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.772780 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"502dbb40-c907-4f1f-b590-565e665bf0fc","Type":"ContainerDied","Data":"5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675"} Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.772811 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"502dbb40-c907-4f1f-b590-565e665bf0fc","Type":"ContainerDied","Data":"90ef9935407730e9a5a131865120042c0a4fc43604b91839f4b3ba104f336c5a"} Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.772827 4884 scope.go:117] "RemoveContainer" containerID="5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.782554 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "502dbb40-c907-4f1f-b590-565e665bf0fc" (UID: "502dbb40-c907-4f1f-b590-565e665bf0fc"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.811674 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6qjxr\" (UniqueName: \"kubernetes.io/projected/502dbb40-c907-4f1f-b590-565e665bf0fc-kube-api-access-6qjxr\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.811709 4884 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.811719 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.811728 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/502dbb40-c907-4f1f-b590-565e665bf0fc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.917946 4884 scope.go:117] "RemoveContainer" containerID="b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.942792 4884 scope.go:117] "RemoveContainer" containerID="5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675" Dec 10 00:58:12 crc kubenswrapper[4884]: E1210 00:58:12.943221 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675\": container with ID starting with 5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675 not found: ID does not exist" containerID="5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.943263 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675"} err="failed to get container status \"5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675\": rpc error: code = NotFound desc = could not find container \"5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675\": container with ID starting with 5dc5ff59f9eabce289fcf7101e56d71f4ee30793886c94d64ab859cbc02e1675 not found: ID does not exist" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.943283 4884 scope.go:117] "RemoveContainer" containerID="b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902" Dec 10 00:58:12 crc kubenswrapper[4884]: E1210 00:58:12.943635 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902\": container with ID starting with b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902 not found: ID does not exist" containerID="b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902" Dec 10 00:58:12 crc kubenswrapper[4884]: I1210 00:58:12.943675 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902"} err="failed to get container status \"b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902\": rpc error: code = NotFound desc = could not find container \"b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902\": container with ID starting with b3a5123a57750dfacd3e448554324a95b65852bd906dc6fe5843136119479902 not found: ID does not exist" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.105371 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.113860 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.142736 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:58:13 crc kubenswrapper[4884]: E1210 00:58:13.143284 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="502dbb40-c907-4f1f-b590-565e665bf0fc" containerName="nova-metadata-metadata" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.143306 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="502dbb40-c907-4f1f-b590-565e665bf0fc" containerName="nova-metadata-metadata" Dec 10 00:58:13 crc kubenswrapper[4884]: E1210 00:58:13.143331 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="502dbb40-c907-4f1f-b590-565e665bf0fc" containerName="nova-metadata-log" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.143338 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="502dbb40-c907-4f1f-b590-565e665bf0fc" containerName="nova-metadata-log" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.143568 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="502dbb40-c907-4f1f-b590-565e665bf0fc" containerName="nova-metadata-log" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.143598 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="502dbb40-c907-4f1f-b590-565e665bf0fc" containerName="nova-metadata-metadata" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.144760 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.154066 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.204005 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.204067 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.219252 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b352746-53a0-48f5-b381-1f286688c77b-logs\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.219423 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-config-data\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.219584 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.219642 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.219724 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfgl9\" (UniqueName: \"kubernetes.io/projected/3b352746-53a0-48f5-b381-1f286688c77b-kube-api-access-zfgl9\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.303242 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="502dbb40-c907-4f1f-b590-565e665bf0fc" path="/var/lib/kubelet/pods/502dbb40-c907-4f1f-b590-565e665bf0fc/volumes" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.321189 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b352746-53a0-48f5-b381-1f286688c77b-logs\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.321350 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-config-data\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.321427 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.321519 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.321589 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b352746-53a0-48f5-b381-1f286688c77b-logs\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.321594 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfgl9\" (UniqueName: \"kubernetes.io/projected/3b352746-53a0-48f5-b381-1f286688c77b-kube-api-access-zfgl9\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.325576 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-config-data\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.325883 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.326091 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.338616 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfgl9\" (UniqueName: \"kubernetes.io/projected/3b352746-53a0-48f5-b381-1f286688c77b-kube-api-access-zfgl9\") pod \"nova-metadata-0\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.499623 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.517471 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.630211 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-fp9tb"] Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.631013 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" podUID="a1218ce1-2ed9-4191-9965-5c7c8ef9e842" containerName="dnsmasq-dns" containerID="cri-o://72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6" gracePeriod=10 Dec 10 00:58:13 crc kubenswrapper[4884]: I1210 00:58:13.821550 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6829ec3-2b83-4cf5-a39a-b48697dac2b9","Type":"ContainerStarted","Data":"e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0"} Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.189526 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.258754 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.349772 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-ovsdbserver-sb\") pod \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.349887 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qpdm\" (UniqueName: \"kubernetes.io/projected/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-kube-api-access-4qpdm\") pod \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.349938 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-dns-swift-storage-0\") pod \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.350027 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-ovsdbserver-nb\") pod \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.350073 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-config\") pod \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.350154 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-dns-svc\") pod \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\" (UID: \"a1218ce1-2ed9-4191-9965-5c7c8ef9e842\") " Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.375121 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-kube-api-access-4qpdm" (OuterVolumeSpecName: "kube-api-access-4qpdm") pod "a1218ce1-2ed9-4191-9965-5c7c8ef9e842" (UID: "a1218ce1-2ed9-4191-9965-5c7c8ef9e842"). InnerVolumeSpecName "kube-api-access-4qpdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.423822 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.452811 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qpdm\" (UniqueName: \"kubernetes.io/projected/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-kube-api-access-4qpdm\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.596573 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a1218ce1-2ed9-4191-9965-5c7c8ef9e842" (UID: "a1218ce1-2ed9-4191-9965-5c7c8ef9e842"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.632612 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a1218ce1-2ed9-4191-9965-5c7c8ef9e842" (UID: "a1218ce1-2ed9-4191-9965-5c7c8ef9e842"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.634399 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a1218ce1-2ed9-4191-9965-5c7c8ef9e842" (UID: "a1218ce1-2ed9-4191-9965-5c7c8ef9e842"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.641960 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-config" (OuterVolumeSpecName: "config") pod "a1218ce1-2ed9-4191-9965-5c7c8ef9e842" (UID: "a1218ce1-2ed9-4191-9965-5c7c8ef9e842"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.645834 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a1218ce1-2ed9-4191-9965-5c7c8ef9e842" (UID: "a1218ce1-2ed9-4191-9965-5c7c8ef9e842"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.681734 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.682264 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.682288 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.682304 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.682318 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a1218ce1-2ed9-4191-9965-5c7c8ef9e842-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.850606 4884 generic.go:334] "Generic (PLEG): container finished" podID="a1218ce1-2ed9-4191-9965-5c7c8ef9e842" containerID="72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6" exitCode=0 Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.851621 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.851998 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" event={"ID":"a1218ce1-2ed9-4191-9965-5c7c8ef9e842","Type":"ContainerDied","Data":"72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6"} Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.852033 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-fp9tb" event={"ID":"a1218ce1-2ed9-4191-9965-5c7c8ef9e842","Type":"ContainerDied","Data":"ed2ac6de9929fbdf13c4615024f477211f102c709d2bd8475de1ba558b031edc"} Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.852049 4884 scope.go:117] "RemoveContainer" containerID="72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.858859 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6829ec3-2b83-4cf5-a39a-b48697dac2b9","Type":"ContainerStarted","Data":"46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec"} Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.863925 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3b352746-53a0-48f5-b381-1f286688c77b","Type":"ContainerStarted","Data":"962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e"} Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.863984 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3b352746-53a0-48f5-b381-1f286688c77b","Type":"ContainerStarted","Data":"46527aef1b8f0bf6240c2b902ab7e0f390c2972396c8cfdbf5e0e969b0478091"} Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.884980 4884 scope.go:117] "RemoveContainer" containerID="56a97e566bd52088cf2aa5f52aaa58f5e5de37c6ca138f38aef12405e03a8ba9" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.917835 4884 scope.go:117] "RemoveContainer" containerID="72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6" Dec 10 00:58:14 crc kubenswrapper[4884]: E1210 00:58:14.919878 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6\": container with ID starting with 72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6 not found: ID does not exist" containerID="72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.919921 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6"} err="failed to get container status \"72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6\": rpc error: code = NotFound desc = could not find container \"72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6\": container with ID starting with 72d7e8acac7370befb02540faee4ee662ccd1f161bf6abb1d6e930d39a957dc6 not found: ID does not exist" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.919951 4884 scope.go:117] "RemoveContainer" containerID="56a97e566bd52088cf2aa5f52aaa58f5e5de37c6ca138f38aef12405e03a8ba9" Dec 10 00:58:14 crc kubenswrapper[4884]: E1210 00:58:14.920304 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56a97e566bd52088cf2aa5f52aaa58f5e5de37c6ca138f38aef12405e03a8ba9\": container with ID starting with 56a97e566bd52088cf2aa5f52aaa58f5e5de37c6ca138f38aef12405e03a8ba9 not found: ID does not exist" containerID="56a97e566bd52088cf2aa5f52aaa58f5e5de37c6ca138f38aef12405e03a8ba9" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.920366 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56a97e566bd52088cf2aa5f52aaa58f5e5de37c6ca138f38aef12405e03a8ba9"} err="failed to get container status \"56a97e566bd52088cf2aa5f52aaa58f5e5de37c6ca138f38aef12405e03a8ba9\": rpc error: code = NotFound desc = could not find container \"56a97e566bd52088cf2aa5f52aaa58f5e5de37c6ca138f38aef12405e03a8ba9\": container with ID starting with 56a97e566bd52088cf2aa5f52aaa58f5e5de37c6ca138f38aef12405e03a8ba9 not found: ID does not exist" Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.941183 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-fp9tb"] Dec 10 00:58:14 crc kubenswrapper[4884]: I1210 00:58:14.954350 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-fp9tb"] Dec 10 00:58:15 crc kubenswrapper[4884]: I1210 00:58:15.287489 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:58:15 crc kubenswrapper[4884]: E1210 00:58:15.288157 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:58:15 crc kubenswrapper[4884]: I1210 00:58:15.307727 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1218ce1-2ed9-4191-9965-5c7c8ef9e842" path="/var/lib/kubelet/pods/a1218ce1-2ed9-4191-9965-5c7c8ef9e842/volumes" Dec 10 00:58:15 crc kubenswrapper[4884]: I1210 00:58:15.879598 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6829ec3-2b83-4cf5-a39a-b48697dac2b9","Type":"ContainerStarted","Data":"3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d"} Dec 10 00:58:15 crc kubenswrapper[4884]: I1210 00:58:15.881091 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3b352746-53a0-48f5-b381-1f286688c77b","Type":"ContainerStarted","Data":"eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a"} Dec 10 00:58:15 crc kubenswrapper[4884]: I1210 00:58:15.916195 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.916161283 podStartE2EDuration="2.916161283s" podCreationTimestamp="2025-12-10 00:58:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:58:15.904785075 +0000 UTC m=+1668.982742202" watchObservedRunningTime="2025-12-10 00:58:15.916161283 +0000 UTC m=+1668.994118430" Dec 10 00:58:16 crc kubenswrapper[4884]: I1210 00:58:16.892565 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6829ec3-2b83-4cf5-a39a-b48697dac2b9","Type":"ContainerStarted","Data":"cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567"} Dec 10 00:58:16 crc kubenswrapper[4884]: I1210 00:58:16.892880 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 00:58:16 crc kubenswrapper[4884]: I1210 00:58:16.923630 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.807375343 podStartE2EDuration="7.923605706s" podCreationTimestamp="2025-12-10 00:58:09 +0000 UTC" firstStartedPulling="2025-12-10 00:58:10.818791482 +0000 UTC m=+1663.896748599" lastFinishedPulling="2025-12-10 00:58:15.935021845 +0000 UTC m=+1669.012978962" observedRunningTime="2025-12-10 00:58:16.913523593 +0000 UTC m=+1669.991480750" watchObservedRunningTime="2025-12-10 00:58:16.923605706 +0000 UTC m=+1670.001562833" Dec 10 00:58:18 crc kubenswrapper[4884]: I1210 00:58:18.517975 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 00:58:18 crc kubenswrapper[4884]: I1210 00:58:18.518368 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 00:58:19 crc kubenswrapper[4884]: I1210 00:58:19.420355 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:19 crc kubenswrapper[4884]: I1210 00:58:19.440131 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:19 crc kubenswrapper[4884]: I1210 00:58:19.961492 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.122391 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.122508 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.240473 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-4st4z"] Dec 10 00:58:20 crc kubenswrapper[4884]: E1210 00:58:20.241018 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1218ce1-2ed9-4191-9965-5c7c8ef9e842" containerName="dnsmasq-dns" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.241040 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1218ce1-2ed9-4191-9965-5c7c8ef9e842" containerName="dnsmasq-dns" Dec 10 00:58:20 crc kubenswrapper[4884]: E1210 00:58:20.241071 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1218ce1-2ed9-4191-9965-5c7c8ef9e842" containerName="init" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.241080 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1218ce1-2ed9-4191-9965-5c7c8ef9e842" containerName="init" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.241379 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1218ce1-2ed9-4191-9965-5c7c8ef9e842" containerName="dnsmasq-dns" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.242354 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.249588 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.249758 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.282691 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-4st4z"] Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.330694 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wr5sr\" (UniqueName: \"kubernetes.io/projected/7ff4c708-d45a-4f64-aab0-db7234765ea8-kube-api-access-wr5sr\") pod \"nova-cell1-cell-mapping-4st4z\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.330761 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4st4z\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.330887 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-scripts\") pod \"nova-cell1-cell-mapping-4st4z\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.331133 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-config-data\") pod \"nova-cell1-cell-mapping-4st4z\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.432377 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-scripts\") pod \"nova-cell1-cell-mapping-4st4z\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.432737 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-config-data\") pod \"nova-cell1-cell-mapping-4st4z\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.432820 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wr5sr\" (UniqueName: \"kubernetes.io/projected/7ff4c708-d45a-4f64-aab0-db7234765ea8-kube-api-access-wr5sr\") pod \"nova-cell1-cell-mapping-4st4z\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.433054 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4st4z\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.442161 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-4st4z\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.442253 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-config-data\") pod \"nova-cell1-cell-mapping-4st4z\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.442260 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-scripts\") pod \"nova-cell1-cell-mapping-4st4z\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.463132 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wr5sr\" (UniqueName: \"kubernetes.io/projected/7ff4c708-d45a-4f64-aab0-db7234765ea8-kube-api-access-wr5sr\") pod \"nova-cell1-cell-mapping-4st4z\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:20 crc kubenswrapper[4884]: I1210 00:58:20.572170 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:21 crc kubenswrapper[4884]: I1210 00:58:21.082313 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-4st4z"] Dec 10 00:58:21 crc kubenswrapper[4884]: I1210 00:58:21.134591 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="37468774-eb34-4fb0-b207-78ba8d8009b2" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.238:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 00:58:21 crc kubenswrapper[4884]: I1210 00:58:21.134623 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="37468774-eb34-4fb0-b207-78ba8d8009b2" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.238:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 10 00:58:21 crc kubenswrapper[4884]: I1210 00:58:21.954503 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4st4z" event={"ID":"7ff4c708-d45a-4f64-aab0-db7234765ea8","Type":"ContainerStarted","Data":"738972214ea3943c4c6a6ede34cb37e87ed21dce7a19d4f2dd9b23f72c2a8981"} Dec 10 00:58:21 crc kubenswrapper[4884]: I1210 00:58:21.954788 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4st4z" event={"ID":"7ff4c708-d45a-4f64-aab0-db7234765ea8","Type":"ContainerStarted","Data":"3560105bf831df2a5d33c8d0119f491c7d9b513d6c5054d5002aaeb5b9487750"} Dec 10 00:58:21 crc kubenswrapper[4884]: I1210 00:58:21.985289 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-4st4z" podStartSLOduration=1.985271149 podStartE2EDuration="1.985271149s" podCreationTimestamp="2025-12-10 00:58:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:58:21.977546221 +0000 UTC m=+1675.055503338" watchObservedRunningTime="2025-12-10 00:58:21.985271149 +0000 UTC m=+1675.063228256" Dec 10 00:58:23 crc kubenswrapper[4884]: I1210 00:58:23.517858 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 10 00:58:23 crc kubenswrapper[4884]: I1210 00:58:23.518130 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 10 00:58:24 crc kubenswrapper[4884]: I1210 00:58:24.569628 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="3b352746-53a0-48f5-b381-1f286688c77b" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.240:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 00:58:24 crc kubenswrapper[4884]: I1210 00:58:24.569724 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="3b352746-53a0-48f5-b381-1f286688c77b" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.240:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 00:58:24 crc kubenswrapper[4884]: I1210 00:58:24.915870 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 10 00:58:24 crc kubenswrapper[4884]: I1210 00:58:24.994632 4884 generic.go:334] "Generic (PLEG): container finished" podID="42adb60e-d792-453f-a207-41beb4c9fa48" containerID="a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064" exitCode=137 Dec 10 00:58:24 crc kubenswrapper[4884]: I1210 00:58:24.994672 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"42adb60e-d792-453f-a207-41beb4c9fa48","Type":"ContainerDied","Data":"a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064"} Dec 10 00:58:24 crc kubenswrapper[4884]: I1210 00:58:24.994698 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"42adb60e-d792-453f-a207-41beb4c9fa48","Type":"ContainerDied","Data":"01823eb97217f5d2ea4f55e9992ab0e7c52f976916f1e10fe80cd58ab66f35ab"} Dec 10 00:58:24 crc kubenswrapper[4884]: I1210 00:58:24.994715 4884 scope.go:117] "RemoveContainer" containerID="a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064" Dec 10 00:58:24 crc kubenswrapper[4884]: I1210 00:58:24.994725 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.017401 4884 scope.go:117] "RemoveContainer" containerID="fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.044164 4884 scope.go:117] "RemoveContainer" containerID="87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.052863 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-combined-ca-bundle\") pod \"42adb60e-d792-453f-a207-41beb4c9fa48\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.053107 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkblk\" (UniqueName: \"kubernetes.io/projected/42adb60e-d792-453f-a207-41beb4c9fa48-kube-api-access-hkblk\") pod \"42adb60e-d792-453f-a207-41beb4c9fa48\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.053331 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-config-data\") pod \"42adb60e-d792-453f-a207-41beb4c9fa48\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.053409 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-scripts\") pod \"42adb60e-d792-453f-a207-41beb4c9fa48\" (UID: \"42adb60e-d792-453f-a207-41beb4c9fa48\") " Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.066663 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42adb60e-d792-453f-a207-41beb4c9fa48-kube-api-access-hkblk" (OuterVolumeSpecName: "kube-api-access-hkblk") pod "42adb60e-d792-453f-a207-41beb4c9fa48" (UID: "42adb60e-d792-453f-a207-41beb4c9fa48"). InnerVolumeSpecName "kube-api-access-hkblk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.078799 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-scripts" (OuterVolumeSpecName: "scripts") pod "42adb60e-d792-453f-a207-41beb4c9fa48" (UID: "42adb60e-d792-453f-a207-41beb4c9fa48"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.099853 4884 scope.go:117] "RemoveContainer" containerID="345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.161773 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.161992 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkblk\" (UniqueName: \"kubernetes.io/projected/42adb60e-d792-453f-a207-41beb4c9fa48-kube-api-access-hkblk\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.200593 4884 scope.go:117] "RemoveContainer" containerID="a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064" Dec 10 00:58:25 crc kubenswrapper[4884]: E1210 00:58:25.201853 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064\": container with ID starting with a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064 not found: ID does not exist" containerID="a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.201885 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064"} err="failed to get container status \"a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064\": rpc error: code = NotFound desc = could not find container \"a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064\": container with ID starting with a98e8e72a23fc75fa7f16c9d8e5587f2fe0751d10702a48c7102878934c65064 not found: ID does not exist" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.201906 4884 scope.go:117] "RemoveContainer" containerID="fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27" Dec 10 00:58:25 crc kubenswrapper[4884]: E1210 00:58:25.205057 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27\": container with ID starting with fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27 not found: ID does not exist" containerID="fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.205116 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27"} err="failed to get container status \"fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27\": rpc error: code = NotFound desc = could not find container \"fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27\": container with ID starting with fef37e9a52bd63a6ebae4e0e548612c55aecccb5c5738faee9fff1b2cd6e8f27 not found: ID does not exist" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.205152 4884 scope.go:117] "RemoveContainer" containerID="87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b" Dec 10 00:58:25 crc kubenswrapper[4884]: E1210 00:58:25.205506 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b\": container with ID starting with 87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b not found: ID does not exist" containerID="87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.205536 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b"} err="failed to get container status \"87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b\": rpc error: code = NotFound desc = could not find container \"87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b\": container with ID starting with 87b9f560727be9ab9c6eb1ee095821324353f3d521a06bb7cce582d16c8fa08b not found: ID does not exist" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.205555 4884 scope.go:117] "RemoveContainer" containerID="345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45" Dec 10 00:58:25 crc kubenswrapper[4884]: E1210 00:58:25.206111 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45\": container with ID starting with 345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45 not found: ID does not exist" containerID="345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.206195 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45"} err="failed to get container status \"345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45\": rpc error: code = NotFound desc = could not find container \"345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45\": container with ID starting with 345311bffc73f17ebb65ff8f73c0510e302e96e29550834eba554ce1126b6f45 not found: ID does not exist" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.224588 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "42adb60e-d792-453f-a207-41beb4c9fa48" (UID: "42adb60e-d792-453f-a207-41beb4c9fa48"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.248367 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-config-data" (OuterVolumeSpecName: "config-data") pod "42adb60e-d792-453f-a207-41beb4c9fa48" (UID: "42adb60e-d792-453f-a207-41beb4c9fa48"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.264074 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.264120 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42adb60e-d792-453f-a207-41beb4c9fa48-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.334037 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.344126 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.359367 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Dec 10 00:58:25 crc kubenswrapper[4884]: E1210 00:58:25.359907 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-evaluator" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.359931 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-evaluator" Dec 10 00:58:25 crc kubenswrapper[4884]: E1210 00:58:25.359965 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-notifier" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.359975 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-notifier" Dec 10 00:58:25 crc kubenswrapper[4884]: E1210 00:58:25.359995 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-api" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.360002 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-api" Dec 10 00:58:25 crc kubenswrapper[4884]: E1210 00:58:25.360015 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-listener" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.360024 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-listener" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.360259 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-notifier" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.360291 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-evaluator" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.360306 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-api" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.360320 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" containerName="aodh-listener" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.362633 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.369272 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.371255 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.371331 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.371466 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-pd287" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.371782 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.378482 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.467996 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-config-data\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.468229 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-combined-ca-bundle\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.468303 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-scripts\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.468339 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-internal-tls-certs\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.468357 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdfk8\" (UniqueName: \"kubernetes.io/projected/81aa3793-6c68-4ffc-b413-293b268d2ccd-kube-api-access-fdfk8\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.468405 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-public-tls-certs\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.571632 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-scripts\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.571761 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-internal-tls-certs\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.571799 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdfk8\" (UniqueName: \"kubernetes.io/projected/81aa3793-6c68-4ffc-b413-293b268d2ccd-kube-api-access-fdfk8\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.571865 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-public-tls-certs\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.572014 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-config-data\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.572074 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-combined-ca-bundle\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.576513 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-public-tls-certs\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.577065 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-scripts\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.577639 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-combined-ca-bundle\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.580469 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-config-data\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.582105 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/81aa3793-6c68-4ffc-b413-293b268d2ccd-internal-tls-certs\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.593141 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdfk8\" (UniqueName: \"kubernetes.io/projected/81aa3793-6c68-4ffc-b413-293b268d2ccd-kube-api-access-fdfk8\") pod \"aodh-0\" (UID: \"81aa3793-6c68-4ffc-b413-293b268d2ccd\") " pod="openstack/aodh-0" Dec 10 00:58:25 crc kubenswrapper[4884]: I1210 00:58:25.678470 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Dec 10 00:58:26 crc kubenswrapper[4884]: W1210 00:58:26.153773 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81aa3793_6c68_4ffc_b413_293b268d2ccd.slice/crio-54390debbca9f92207e56b7a2d91f1585a6bf6c97bf37c9f52dad54457aaf550 WatchSource:0}: Error finding container 54390debbca9f92207e56b7a2d91f1585a6bf6c97bf37c9f52dad54457aaf550: Status 404 returned error can't find the container with id 54390debbca9f92207e56b7a2d91f1585a6bf6c97bf37c9f52dad54457aaf550 Dec 10 00:58:26 crc kubenswrapper[4884]: I1210 00:58:26.158273 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Dec 10 00:58:26 crc kubenswrapper[4884]: I1210 00:58:26.287636 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:58:26 crc kubenswrapper[4884]: E1210 00:58:26.287894 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:58:27 crc kubenswrapper[4884]: I1210 00:58:27.024576 4884 generic.go:334] "Generic (PLEG): container finished" podID="7ff4c708-d45a-4f64-aab0-db7234765ea8" containerID="738972214ea3943c4c6a6ede34cb37e87ed21dce7a19d4f2dd9b23f72c2a8981" exitCode=0 Dec 10 00:58:27 crc kubenswrapper[4884]: I1210 00:58:27.024692 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4st4z" event={"ID":"7ff4c708-d45a-4f64-aab0-db7234765ea8","Type":"ContainerDied","Data":"738972214ea3943c4c6a6ede34cb37e87ed21dce7a19d4f2dd9b23f72c2a8981"} Dec 10 00:58:27 crc kubenswrapper[4884]: I1210 00:58:27.027239 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"81aa3793-6c68-4ffc-b413-293b268d2ccd","Type":"ContainerStarted","Data":"54390debbca9f92207e56b7a2d91f1585a6bf6c97bf37c9f52dad54457aaf550"} Dec 10 00:58:27 crc kubenswrapper[4884]: I1210 00:58:27.334926 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42adb60e-d792-453f-a207-41beb4c9fa48" path="/var/lib/kubelet/pods/42adb60e-d792-453f-a207-41beb4c9fa48/volumes" Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.045894 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"81aa3793-6c68-4ffc-b413-293b268d2ccd","Type":"ContainerStarted","Data":"92153ef3a3b22db64d190920fe972116014bcf1b9f81ef7dd0ef2ef79940f153"} Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.046585 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"81aa3793-6c68-4ffc-b413-293b268d2ccd","Type":"ContainerStarted","Data":"04ef4c36273a8df105e28304f2483a0d9b806b803894bae34d8b1738a63a07f4"} Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.566782 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.636510 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-combined-ca-bundle\") pod \"7ff4c708-d45a-4f64-aab0-db7234765ea8\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.636571 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-scripts\") pod \"7ff4c708-d45a-4f64-aab0-db7234765ea8\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.636656 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wr5sr\" (UniqueName: \"kubernetes.io/projected/7ff4c708-d45a-4f64-aab0-db7234765ea8-kube-api-access-wr5sr\") pod \"7ff4c708-d45a-4f64-aab0-db7234765ea8\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.636678 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-config-data\") pod \"7ff4c708-d45a-4f64-aab0-db7234765ea8\" (UID: \"7ff4c708-d45a-4f64-aab0-db7234765ea8\") " Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.639524 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-scripts" (OuterVolumeSpecName: "scripts") pod "7ff4c708-d45a-4f64-aab0-db7234765ea8" (UID: "7ff4c708-d45a-4f64-aab0-db7234765ea8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.642780 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ff4c708-d45a-4f64-aab0-db7234765ea8-kube-api-access-wr5sr" (OuterVolumeSpecName: "kube-api-access-wr5sr") pod "7ff4c708-d45a-4f64-aab0-db7234765ea8" (UID: "7ff4c708-d45a-4f64-aab0-db7234765ea8"). InnerVolumeSpecName "kube-api-access-wr5sr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.668919 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ff4c708-d45a-4f64-aab0-db7234765ea8" (UID: "7ff4c708-d45a-4f64-aab0-db7234765ea8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.671370 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-config-data" (OuterVolumeSpecName: "config-data") pod "7ff4c708-d45a-4f64-aab0-db7234765ea8" (UID: "7ff4c708-d45a-4f64-aab0-db7234765ea8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.738518 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.738553 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.738565 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wr5sr\" (UniqueName: \"kubernetes.io/projected/7ff4c708-d45a-4f64-aab0-db7234765ea8-kube-api-access-wr5sr\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:28 crc kubenswrapper[4884]: I1210 00:58:28.738575 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ff4c708-d45a-4f64-aab0-db7234765ea8-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:29 crc kubenswrapper[4884]: I1210 00:58:29.060921 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-4st4z" event={"ID":"7ff4c708-d45a-4f64-aab0-db7234765ea8","Type":"ContainerDied","Data":"3560105bf831df2a5d33c8d0119f491c7d9b513d6c5054d5002aaeb5b9487750"} Dec 10 00:58:29 crc kubenswrapper[4884]: I1210 00:58:29.061147 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3560105bf831df2a5d33c8d0119f491c7d9b513d6c5054d5002aaeb5b9487750" Dec 10 00:58:29 crc kubenswrapper[4884]: I1210 00:58:29.061204 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-4st4z" Dec 10 00:58:29 crc kubenswrapper[4884]: I1210 00:58:29.063580 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"81aa3793-6c68-4ffc-b413-293b268d2ccd","Type":"ContainerStarted","Data":"d87d690b3246cc377b1b7ca78521d2f421c12512ec2d5e34be2b5eafc3b95247"} Dec 10 00:58:29 crc kubenswrapper[4884]: I1210 00:58:29.285496 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:58:29 crc kubenswrapper[4884]: I1210 00:58:29.285767 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="37468774-eb34-4fb0-b207-78ba8d8009b2" containerName="nova-api-log" containerID="cri-o://f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a" gracePeriod=30 Dec 10 00:58:29 crc kubenswrapper[4884]: I1210 00:58:29.286240 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="37468774-eb34-4fb0-b207-78ba8d8009b2" containerName="nova-api-api" containerID="cri-o://bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77" gracePeriod=30 Dec 10 00:58:29 crc kubenswrapper[4884]: I1210 00:58:29.313392 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:58:29 crc kubenswrapper[4884]: I1210 00:58:29.313644 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="5664a663-e831-4d21-a144-02dfe91bf470" containerName="nova-scheduler-scheduler" containerID="cri-o://4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8" gracePeriod=30 Dec 10 00:58:29 crc kubenswrapper[4884]: I1210 00:58:29.331632 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:58:29 crc kubenswrapper[4884]: I1210 00:58:29.331908 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="3b352746-53a0-48f5-b381-1f286688c77b" containerName="nova-metadata-log" containerID="cri-o://962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e" gracePeriod=30 Dec 10 00:58:29 crc kubenswrapper[4884]: I1210 00:58:29.332315 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="3b352746-53a0-48f5-b381-1f286688c77b" containerName="nova-metadata-metadata" containerID="cri-o://eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a" gracePeriod=30 Dec 10 00:58:30 crc kubenswrapper[4884]: I1210 00:58:30.077004 4884 generic.go:334] "Generic (PLEG): container finished" podID="3b352746-53a0-48f5-b381-1f286688c77b" containerID="962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e" exitCode=143 Dec 10 00:58:30 crc kubenswrapper[4884]: I1210 00:58:30.077098 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3b352746-53a0-48f5-b381-1f286688c77b","Type":"ContainerDied","Data":"962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e"} Dec 10 00:58:30 crc kubenswrapper[4884]: I1210 00:58:30.079626 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"81aa3793-6c68-4ffc-b413-293b268d2ccd","Type":"ContainerStarted","Data":"a6ee10a5093e738328766887aacad10ce7a9af1097d5279b0b194ba25ad23d91"} Dec 10 00:58:30 crc kubenswrapper[4884]: I1210 00:58:30.081862 4884 generic.go:334] "Generic (PLEG): container finished" podID="37468774-eb34-4fb0-b207-78ba8d8009b2" containerID="f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a" exitCode=143 Dec 10 00:58:30 crc kubenswrapper[4884]: I1210 00:58:30.081924 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"37468774-eb34-4fb0-b207-78ba8d8009b2","Type":"ContainerDied","Data":"f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a"} Dec 10 00:58:30 crc kubenswrapper[4884]: I1210 00:58:30.112304 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.169530973 podStartE2EDuration="5.112273035s" podCreationTimestamp="2025-12-10 00:58:25 +0000 UTC" firstStartedPulling="2025-12-10 00:58:26.158675969 +0000 UTC m=+1679.236633086" lastFinishedPulling="2025-12-10 00:58:29.101417991 +0000 UTC m=+1682.179375148" observedRunningTime="2025-12-10 00:58:30.107035174 +0000 UTC m=+1683.184992291" watchObservedRunningTime="2025-12-10 00:58:30.112273035 +0000 UTC m=+1683.190230152" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.061232 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.072427 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.135715 4884 generic.go:334] "Generic (PLEG): container finished" podID="37468774-eb34-4fb0-b207-78ba8d8009b2" containerID="bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77" exitCode=0 Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.135817 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"37468774-eb34-4fb0-b207-78ba8d8009b2","Type":"ContainerDied","Data":"bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77"} Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.135819 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.135871 4884 scope.go:117] "RemoveContainer" containerID="bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.135855 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"37468774-eb34-4fb0-b207-78ba8d8009b2","Type":"ContainerDied","Data":"1e41d54e9928b8b18ec5eed65f8e430e3db08dff744461d1e0b7e84b39902208"} Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.138871 4884 generic.go:334] "Generic (PLEG): container finished" podID="3b352746-53a0-48f5-b381-1f286688c77b" containerID="eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a" exitCode=0 Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.138900 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3b352746-53a0-48f5-b381-1f286688c77b","Type":"ContainerDied","Data":"eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a"} Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.138917 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"3b352746-53a0-48f5-b381-1f286688c77b","Type":"ContainerDied","Data":"46527aef1b8f0bf6240c2b902ab7e0f390c2972396c8cfdbf5e0e969b0478091"} Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.138969 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.149244 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-config-data\") pod \"3b352746-53a0-48f5-b381-1f286688c77b\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.149289 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flxrt\" (UniqueName: \"kubernetes.io/projected/37468774-eb34-4fb0-b207-78ba8d8009b2-kube-api-access-flxrt\") pod \"37468774-eb34-4fb0-b207-78ba8d8009b2\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.167725 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37468774-eb34-4fb0-b207-78ba8d8009b2-kube-api-access-flxrt" (OuterVolumeSpecName: "kube-api-access-flxrt") pod "37468774-eb34-4fb0-b207-78ba8d8009b2" (UID: "37468774-eb34-4fb0-b207-78ba8d8009b2"). InnerVolumeSpecName "kube-api-access-flxrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.167840 4884 scope.go:117] "RemoveContainer" containerID="f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.201230 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-config-data" (OuterVolumeSpecName: "config-data") pod "3b352746-53a0-48f5-b381-1f286688c77b" (UID: "3b352746-53a0-48f5-b381-1f286688c77b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.229959 4884 scope.go:117] "RemoveContainer" containerID="bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77" Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.230507 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77\": container with ID starting with bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77 not found: ID does not exist" containerID="bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.230560 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77"} err="failed to get container status \"bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77\": rpc error: code = NotFound desc = could not find container \"bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77\": container with ID starting with bc73931741d1de20370602ba775ea6a2331e7e3dab46adb44220f74dc0c20d77 not found: ID does not exist" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.230589 4884 scope.go:117] "RemoveContainer" containerID="f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a" Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.230900 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a\": container with ID starting with f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a not found: ID does not exist" containerID="f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.230934 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a"} err="failed to get container status \"f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a\": rpc error: code = NotFound desc = could not find container \"f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a\": container with ID starting with f1e73d86326029ce4e6704dd5574b43317532095fa13c3eae75129a39ff8505a not found: ID does not exist" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.230953 4884 scope.go:117] "RemoveContainer" containerID="eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.250568 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-nova-metadata-tls-certs\") pod \"3b352746-53a0-48f5-b381-1f286688c77b\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.250662 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfgl9\" (UniqueName: \"kubernetes.io/projected/3b352746-53a0-48f5-b381-1f286688c77b-kube-api-access-zfgl9\") pod \"3b352746-53a0-48f5-b381-1f286688c77b\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.250694 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-public-tls-certs\") pod \"37468774-eb34-4fb0-b207-78ba8d8009b2\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.250729 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-config-data\") pod \"37468774-eb34-4fb0-b207-78ba8d8009b2\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.250788 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-combined-ca-bundle\") pod \"3b352746-53a0-48f5-b381-1f286688c77b\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.250836 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-combined-ca-bundle\") pod \"37468774-eb34-4fb0-b207-78ba8d8009b2\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.250868 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b352746-53a0-48f5-b381-1f286688c77b-logs\") pod \"3b352746-53a0-48f5-b381-1f286688c77b\" (UID: \"3b352746-53a0-48f5-b381-1f286688c77b\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.250945 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37468774-eb34-4fb0-b207-78ba8d8009b2-logs\") pod \"37468774-eb34-4fb0-b207-78ba8d8009b2\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.251039 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-internal-tls-certs\") pod \"37468774-eb34-4fb0-b207-78ba8d8009b2\" (UID: \"37468774-eb34-4fb0-b207-78ba8d8009b2\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.251657 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b352746-53a0-48f5-b381-1f286688c77b-logs" (OuterVolumeSpecName: "logs") pod "3b352746-53a0-48f5-b381-1f286688c77b" (UID: "3b352746-53a0-48f5-b381-1f286688c77b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.251677 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.251696 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flxrt\" (UniqueName: \"kubernetes.io/projected/37468774-eb34-4fb0-b207-78ba8d8009b2-kube-api-access-flxrt\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.252690 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37468774-eb34-4fb0-b207-78ba8d8009b2-logs" (OuterVolumeSpecName: "logs") pod "37468774-eb34-4fb0-b207-78ba8d8009b2" (UID: "37468774-eb34-4fb0-b207-78ba8d8009b2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.257950 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b352746-53a0-48f5-b381-1f286688c77b-kube-api-access-zfgl9" (OuterVolumeSpecName: "kube-api-access-zfgl9") pod "3b352746-53a0-48f5-b381-1f286688c77b" (UID: "3b352746-53a0-48f5-b381-1f286688c77b"). InnerVolumeSpecName "kube-api-access-zfgl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.262509 4884 scope.go:117] "RemoveContainer" containerID="962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.282543 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3b352746-53a0-48f5-b381-1f286688c77b" (UID: "3b352746-53a0-48f5-b381-1f286688c77b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.282560 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "37468774-eb34-4fb0-b207-78ba8d8009b2" (UID: "37468774-eb34-4fb0-b207-78ba8d8009b2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.285705 4884 scope.go:117] "RemoveContainer" containerID="eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a" Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.286052 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a\": container with ID starting with eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a not found: ID does not exist" containerID="eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.286083 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a"} err="failed to get container status \"eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a\": rpc error: code = NotFound desc = could not find container \"eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a\": container with ID starting with eb4236acce60bc5278378cd3d0ee775825c99eb156830afc0f17d89f3da1822a not found: ID does not exist" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.286104 4884 scope.go:117] "RemoveContainer" containerID="962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e" Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.287255 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e\": container with ID starting with 962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e not found: ID does not exist" containerID="962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.287277 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e"} err="failed to get container status \"962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e\": rpc error: code = NotFound desc = could not find container \"962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e\": container with ID starting with 962dc94bee82b65b16faf76b25762cfcdfbe056c72a0640965116b8d16f6e04e not found: ID does not exist" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.293870 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-config-data" (OuterVolumeSpecName: "config-data") pod "37468774-eb34-4fb0-b207-78ba8d8009b2" (UID: "37468774-eb34-4fb0-b207-78ba8d8009b2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.327260 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "3b352746-53a0-48f5-b381-1f286688c77b" (UID: "3b352746-53a0-48f5-b381-1f286688c77b"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.327638 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "37468774-eb34-4fb0-b207-78ba8d8009b2" (UID: "37468774-eb34-4fb0-b207-78ba8d8009b2"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.329891 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "37468774-eb34-4fb0-b207-78ba8d8009b2" (UID: "37468774-eb34-4fb0-b207-78ba8d8009b2"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.356269 4884 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.356311 4884 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.356325 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfgl9\" (UniqueName: \"kubernetes.io/projected/3b352746-53a0-48f5-b381-1f286688c77b-kube-api-access-zfgl9\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.356337 4884 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.356348 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.356360 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b352746-53a0-48f5-b381-1f286688c77b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.356371 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37468774-eb34-4fb0-b207-78ba8d8009b2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.356382 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b352746-53a0-48f5-b381-1f286688c77b-logs\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.356393 4884 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37468774-eb34-4fb0-b207-78ba8d8009b2-logs\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.465873 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.483743 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.509022 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.509538 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ff4c708-d45a-4f64-aab0-db7234765ea8" containerName="nova-manage" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.509554 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ff4c708-d45a-4f64-aab0-db7234765ea8" containerName="nova-manage" Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.509573 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37468774-eb34-4fb0-b207-78ba8d8009b2" containerName="nova-api-api" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.509581 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="37468774-eb34-4fb0-b207-78ba8d8009b2" containerName="nova-api-api" Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.509594 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37468774-eb34-4fb0-b207-78ba8d8009b2" containerName="nova-api-log" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.509600 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="37468774-eb34-4fb0-b207-78ba8d8009b2" containerName="nova-api-log" Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.509618 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b352746-53a0-48f5-b381-1f286688c77b" containerName="nova-metadata-log" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.509624 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b352746-53a0-48f5-b381-1f286688c77b" containerName="nova-metadata-log" Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.509634 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b352746-53a0-48f5-b381-1f286688c77b" containerName="nova-metadata-metadata" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.509640 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b352746-53a0-48f5-b381-1f286688c77b" containerName="nova-metadata-metadata" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.509844 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="37468774-eb34-4fb0-b207-78ba8d8009b2" containerName="nova-api-log" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.509864 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="37468774-eb34-4fb0-b207-78ba8d8009b2" containerName="nova-api-api" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.509874 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ff4c708-d45a-4f64-aab0-db7234765ea8" containerName="nova-manage" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.509884 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b352746-53a0-48f5-b381-1f286688c77b" containerName="nova-metadata-metadata" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.509900 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b352746-53a0-48f5-b381-1f286688c77b" containerName="nova-metadata-log" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.511021 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.513425 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.513700 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.517026 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.524837 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.543539 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.555228 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.559339 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33298def-a273-4c2a-bc65-32dc50928a1a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.559452 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33298def-a273-4c2a-bc65-32dc50928a1a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.559491 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33298def-a273-4c2a-bc65-32dc50928a1a-config-data\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.559513 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33298def-a273-4c2a-bc65-32dc50928a1a-logs\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.559540 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6t6sb\" (UniqueName: \"kubernetes.io/projected/33298def-a273-4c2a-bc65-32dc50928a1a-kube-api-access-6t6sb\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.559558 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/33298def-a273-4c2a-bc65-32dc50928a1a-public-tls-certs\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.565654 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.567416 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.570783 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.570980 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.578095 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8 is running failed: container process not found" containerID="4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.578460 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8 is running failed: container process not found" containerID="4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.578712 4884 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8 is running failed: container process not found" containerID="4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 10 00:58:33 crc kubenswrapper[4884]: E1210 00:58:33.578736 4884 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="5664a663-e831-4d21-a144-02dfe91bf470" containerName="nova-scheduler-scheduler" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.587293 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.661734 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33298def-a273-4c2a-bc65-32dc50928a1a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.661786 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/112d4b11-3fd1-4277-b2ec-8b87cae86c10-config-data\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.661812 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/112d4b11-3fd1-4277-b2ec-8b87cae86c10-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.661836 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33298def-a273-4c2a-bc65-32dc50928a1a-config-data\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.661860 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33298def-a273-4c2a-bc65-32dc50928a1a-logs\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.661889 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6t6sb\" (UniqueName: \"kubernetes.io/projected/33298def-a273-4c2a-bc65-32dc50928a1a-kube-api-access-6t6sb\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.661908 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/33298def-a273-4c2a-bc65-32dc50928a1a-public-tls-certs\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.661956 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/112d4b11-3fd1-4277-b2ec-8b87cae86c10-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.661986 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33298def-a273-4c2a-bc65-32dc50928a1a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.662015 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/112d4b11-3fd1-4277-b2ec-8b87cae86c10-logs\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.662063 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9zxt\" (UniqueName: \"kubernetes.io/projected/112d4b11-3fd1-4277-b2ec-8b87cae86c10-kube-api-access-x9zxt\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.665053 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/33298def-a273-4c2a-bc65-32dc50928a1a-logs\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.666883 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33298def-a273-4c2a-bc65-32dc50928a1a-config-data\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.667166 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33298def-a273-4c2a-bc65-32dc50928a1a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.667527 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/33298def-a273-4c2a-bc65-32dc50928a1a-public-tls-certs\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.668922 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/33298def-a273-4c2a-bc65-32dc50928a1a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.679543 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6t6sb\" (UniqueName: \"kubernetes.io/projected/33298def-a273-4c2a-bc65-32dc50928a1a-kube-api-access-6t6sb\") pod \"nova-api-0\" (UID: \"33298def-a273-4c2a-bc65-32dc50928a1a\") " pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.764654 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9zxt\" (UniqueName: \"kubernetes.io/projected/112d4b11-3fd1-4277-b2ec-8b87cae86c10-kube-api-access-x9zxt\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.765170 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/112d4b11-3fd1-4277-b2ec-8b87cae86c10-config-data\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.765208 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/112d4b11-3fd1-4277-b2ec-8b87cae86c10-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.765351 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/112d4b11-3fd1-4277-b2ec-8b87cae86c10-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.765421 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/112d4b11-3fd1-4277-b2ec-8b87cae86c10-logs\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.765960 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/112d4b11-3fd1-4277-b2ec-8b87cae86c10-logs\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.769410 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/112d4b11-3fd1-4277-b2ec-8b87cae86c10-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.770114 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/112d4b11-3fd1-4277-b2ec-8b87cae86c10-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.770141 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/112d4b11-3fd1-4277-b2ec-8b87cae86c10-config-data\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.770790 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.793352 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9zxt\" (UniqueName: \"kubernetes.io/projected/112d4b11-3fd1-4277-b2ec-8b87cae86c10-kube-api-access-x9zxt\") pod \"nova-metadata-0\" (UID: \"112d4b11-3fd1-4277-b2ec-8b87cae86c10\") " pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.866727 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbklt\" (UniqueName: \"kubernetes.io/projected/5664a663-e831-4d21-a144-02dfe91bf470-kube-api-access-rbklt\") pod \"5664a663-e831-4d21-a144-02dfe91bf470\" (UID: \"5664a663-e831-4d21-a144-02dfe91bf470\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.866822 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5664a663-e831-4d21-a144-02dfe91bf470-config-data\") pod \"5664a663-e831-4d21-a144-02dfe91bf470\" (UID: \"5664a663-e831-4d21-a144-02dfe91bf470\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.866887 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5664a663-e831-4d21-a144-02dfe91bf470-combined-ca-bundle\") pod \"5664a663-e831-4d21-a144-02dfe91bf470\" (UID: \"5664a663-e831-4d21-a144-02dfe91bf470\") " Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.870617 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5664a663-e831-4d21-a144-02dfe91bf470-kube-api-access-rbklt" (OuterVolumeSpecName: "kube-api-access-rbklt") pod "5664a663-e831-4d21-a144-02dfe91bf470" (UID: "5664a663-e831-4d21-a144-02dfe91bf470"). InnerVolumeSpecName "kube-api-access-rbklt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.895170 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5664a663-e831-4d21-a144-02dfe91bf470-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5664a663-e831-4d21-a144-02dfe91bf470" (UID: "5664a663-e831-4d21-a144-02dfe91bf470"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.910016 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5664a663-e831-4d21-a144-02dfe91bf470-config-data" (OuterVolumeSpecName: "config-data") pod "5664a663-e831-4d21-a144-02dfe91bf470" (UID: "5664a663-e831-4d21-a144-02dfe91bf470"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.916681 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.927856 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.968222 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbklt\" (UniqueName: \"kubernetes.io/projected/5664a663-e831-4d21-a144-02dfe91bf470-kube-api-access-rbklt\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.968256 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5664a663-e831-4d21-a144-02dfe91bf470-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:33 crc kubenswrapper[4884]: I1210 00:58:33.968268 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5664a663-e831-4d21-a144-02dfe91bf470-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.159872 4884 generic.go:334] "Generic (PLEG): container finished" podID="5664a663-e831-4d21-a144-02dfe91bf470" containerID="4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8" exitCode=0 Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.159908 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5664a663-e831-4d21-a144-02dfe91bf470","Type":"ContainerDied","Data":"4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8"} Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.159931 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"5664a663-e831-4d21-a144-02dfe91bf470","Type":"ContainerDied","Data":"a599d23b50de8d33b26aa127c04d335835c2ec8bb2abb655459ca8c4a3bb5ecd"} Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.159948 4884 scope.go:117] "RemoveContainer" containerID="4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.160039 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.231556 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.256496 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.280525 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:58:34 crc kubenswrapper[4884]: E1210 00:58:34.281004 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5664a663-e831-4d21-a144-02dfe91bf470" containerName="nova-scheduler-scheduler" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.281017 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5664a663-e831-4d21-a144-02dfe91bf470" containerName="nova-scheduler-scheduler" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.281237 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5664a663-e831-4d21-a144-02dfe91bf470" containerName="nova-scheduler-scheduler" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.281987 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.286715 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.290231 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.298067 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvm5m\" (UniqueName: \"kubernetes.io/projected/319b944f-ffe5-4b53-90c3-412bf9c8c818-kube-api-access-pvm5m\") pod \"nova-scheduler-0\" (UID: \"319b944f-ffe5-4b53-90c3-412bf9c8c818\") " pod="openstack/nova-scheduler-0" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.301596 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/319b944f-ffe5-4b53-90c3-412bf9c8c818-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"319b944f-ffe5-4b53-90c3-412bf9c8c818\") " pod="openstack/nova-scheduler-0" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.302170 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/319b944f-ffe5-4b53-90c3-412bf9c8c818-config-data\") pod \"nova-scheduler-0\" (UID: \"319b944f-ffe5-4b53-90c3-412bf9c8c818\") " pod="openstack/nova-scheduler-0" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.304563 4884 scope.go:117] "RemoveContainer" containerID="4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8" Dec 10 00:58:34 crc kubenswrapper[4884]: E1210 00:58:34.307532 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8\": container with ID starting with 4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8 not found: ID does not exist" containerID="4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.307633 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8"} err="failed to get container status \"4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8\": rpc error: code = NotFound desc = could not find container \"4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8\": container with ID starting with 4831e1b4aef4245aa210668b11c7f1e72203c03ee9af772efd50be0b78167ff8 not found: ID does not exist" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.404609 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/319b944f-ffe5-4b53-90c3-412bf9c8c818-config-data\") pod \"nova-scheduler-0\" (UID: \"319b944f-ffe5-4b53-90c3-412bf9c8c818\") " pod="openstack/nova-scheduler-0" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.404883 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvm5m\" (UniqueName: \"kubernetes.io/projected/319b944f-ffe5-4b53-90c3-412bf9c8c818-kube-api-access-pvm5m\") pod \"nova-scheduler-0\" (UID: \"319b944f-ffe5-4b53-90c3-412bf9c8c818\") " pod="openstack/nova-scheduler-0" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.404926 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/319b944f-ffe5-4b53-90c3-412bf9c8c818-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"319b944f-ffe5-4b53-90c3-412bf9c8c818\") " pod="openstack/nova-scheduler-0" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.408833 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/319b944f-ffe5-4b53-90c3-412bf9c8c818-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"319b944f-ffe5-4b53-90c3-412bf9c8c818\") " pod="openstack/nova-scheduler-0" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.409941 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/319b944f-ffe5-4b53-90c3-412bf9c8c818-config-data\") pod \"nova-scheduler-0\" (UID: \"319b944f-ffe5-4b53-90c3-412bf9c8c818\") " pod="openstack/nova-scheduler-0" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.427242 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvm5m\" (UniqueName: \"kubernetes.io/projected/319b944f-ffe5-4b53-90c3-412bf9c8c818-kube-api-access-pvm5m\") pod \"nova-scheduler-0\" (UID: \"319b944f-ffe5-4b53-90c3-412bf9c8c818\") " pod="openstack/nova-scheduler-0" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.496272 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 10 00:58:34 crc kubenswrapper[4884]: W1210 00:58:34.509307 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33298def_a273_4c2a_bc65_32dc50928a1a.slice/crio-99b0e5a0500abf3021c7f7d4e5718bfbbd0bb63ff52ba336a246cfe216f0aa09 WatchSource:0}: Error finding container 99b0e5a0500abf3021c7f7d4e5718bfbbd0bb63ff52ba336a246cfe216f0aa09: Status 404 returned error can't find the container with id 99b0e5a0500abf3021c7f7d4e5718bfbbd0bb63ff52ba336a246cfe216f0aa09 Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.643843 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 10 00:58:34 crc kubenswrapper[4884]: I1210 00:58:34.655633 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.136877 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.184012 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"112d4b11-3fd1-4277-b2ec-8b87cae86c10","Type":"ContainerStarted","Data":"200887d7d15e799dfc9e75627f237ae6d74d68349c197acdaeb84c3961505241"} Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.184060 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"112d4b11-3fd1-4277-b2ec-8b87cae86c10","Type":"ContainerStarted","Data":"26807bca1be877bb2e0068e30b4e55ef66516e470dc2a2bd863d8fd25222ff17"} Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.184069 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"112d4b11-3fd1-4277-b2ec-8b87cae86c10","Type":"ContainerStarted","Data":"f4aa02db87a7a6fcc29e0bf4addc4ccf8ccd611a1bc19c4a6d7326d2e2328082"} Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.187486 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"319b944f-ffe5-4b53-90c3-412bf9c8c818","Type":"ContainerStarted","Data":"43488a2e4a4e86a37e361daac89abb141a9f615a961732fd723e220e2a8a5a9b"} Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.190470 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"33298def-a273-4c2a-bc65-32dc50928a1a","Type":"ContainerStarted","Data":"37381ddd78b71de53cf8deb36053533b391c61b87f59e17241c6e280b0c26b5c"} Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.190502 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"33298def-a273-4c2a-bc65-32dc50928a1a","Type":"ContainerStarted","Data":"23e0091225adee3899744969146fe77bdd060ea5064fe2e5a1e081742b811129"} Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.190517 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"33298def-a273-4c2a-bc65-32dc50928a1a","Type":"ContainerStarted","Data":"99b0e5a0500abf3021c7f7d4e5718bfbbd0bb63ff52ba336a246cfe216f0aa09"} Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.213965 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.213942782 podStartE2EDuration="2.213942782s" podCreationTimestamp="2025-12-10 00:58:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:58:35.206106088 +0000 UTC m=+1688.284063215" watchObservedRunningTime="2025-12-10 00:58:35.213942782 +0000 UTC m=+1688.291899899" Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.235154 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.235130578 podStartE2EDuration="2.235130578s" podCreationTimestamp="2025-12-10 00:58:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:58:35.229473304 +0000 UTC m=+1688.307430431" watchObservedRunningTime="2025-12-10 00:58:35.235130578 +0000 UTC m=+1688.313087705" Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.302568 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37468774-eb34-4fb0-b207-78ba8d8009b2" path="/var/lib/kubelet/pods/37468774-eb34-4fb0-b207-78ba8d8009b2/volumes" Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.303271 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b352746-53a0-48f5-b381-1f286688c77b" path="/var/lib/kubelet/pods/3b352746-53a0-48f5-b381-1f286688c77b/volumes" Dec 10 00:58:35 crc kubenswrapper[4884]: I1210 00:58:35.306309 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5664a663-e831-4d21-a144-02dfe91bf470" path="/var/lib/kubelet/pods/5664a663-e831-4d21-a144-02dfe91bf470/volumes" Dec 10 00:58:36 crc kubenswrapper[4884]: I1210 00:58:36.206732 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"319b944f-ffe5-4b53-90c3-412bf9c8c818","Type":"ContainerStarted","Data":"ce9d4cd01e805b6153b1934b21da02891a8f288a6d4dae6279cec3b6e8f22a80"} Dec 10 00:58:36 crc kubenswrapper[4884]: I1210 00:58:36.238424 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.23839655 podStartE2EDuration="2.23839655s" podCreationTimestamp="2025-12-10 00:58:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:58:36.228160072 +0000 UTC m=+1689.306117259" watchObservedRunningTime="2025-12-10 00:58:36.23839655 +0000 UTC m=+1689.316353707" Dec 10 00:58:38 crc kubenswrapper[4884]: I1210 00:58:38.928326 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 00:58:38 crc kubenswrapper[4884]: I1210 00:58:38.928649 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 10 00:58:39 crc kubenswrapper[4884]: I1210 00:58:39.644602 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 10 00:58:40 crc kubenswrapper[4884]: I1210 00:58:40.235828 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 10 00:58:41 crc kubenswrapper[4884]: I1210 00:58:41.288169 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:58:41 crc kubenswrapper[4884]: E1210 00:58:41.289103 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:58:43 crc kubenswrapper[4884]: I1210 00:58:43.917978 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 00:58:43 crc kubenswrapper[4884]: I1210 00:58:43.918049 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 10 00:58:43 crc kubenswrapper[4884]: I1210 00:58:43.928167 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 10 00:58:43 crc kubenswrapper[4884]: I1210 00:58:43.928273 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 10 00:58:44 crc kubenswrapper[4884]: I1210 00:58:44.645322 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 10 00:58:44 crc kubenswrapper[4884]: I1210 00:58:44.687173 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 10 00:58:44 crc kubenswrapper[4884]: I1210 00:58:44.931668 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="33298def-a273-4c2a-bc65-32dc50928a1a" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.243:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 00:58:44 crc kubenswrapper[4884]: I1210 00:58:44.931696 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="33298def-a273-4c2a-bc65-32dc50928a1a" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.243:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 00:58:44 crc kubenswrapper[4884]: I1210 00:58:44.942603 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="112d4b11-3fd1-4277-b2ec-8b87cae86c10" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.244:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 00:58:44 crc kubenswrapper[4884]: I1210 00:58:44.942591 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="112d4b11-3fd1-4277-b2ec-8b87cae86c10" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.244:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 10 00:58:45 crc kubenswrapper[4884]: I1210 00:58:45.369759 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 10 00:58:53 crc kubenswrapper[4884]: I1210 00:58:53.923943 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 10 00:58:53 crc kubenswrapper[4884]: I1210 00:58:53.924926 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 00:58:53 crc kubenswrapper[4884]: I1210 00:58:53.925826 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 10 00:58:53 crc kubenswrapper[4884]: I1210 00:58:53.932220 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 10 00:58:53 crc kubenswrapper[4884]: I1210 00:58:53.934159 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 10 00:58:53 crc kubenswrapper[4884]: I1210 00:58:53.942335 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 10 00:58:53 crc kubenswrapper[4884]: I1210 00:58:53.942509 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 10 00:58:54 crc kubenswrapper[4884]: I1210 00:58:54.287944 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:58:54 crc kubenswrapper[4884]: E1210 00:58:54.288276 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:58:54 crc kubenswrapper[4884]: I1210 00:58:54.452753 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 10 00:58:54 crc kubenswrapper[4884]: I1210 00:58:54.459582 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 10 00:58:54 crc kubenswrapper[4884]: I1210 00:58:54.467045 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 10 00:59:04 crc kubenswrapper[4884]: I1210 00:59:04.774942 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-slxfj"] Dec 10 00:59:04 crc kubenswrapper[4884]: I1210 00:59:04.788466 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-slxfj"] Dec 10 00:59:04 crc kubenswrapper[4884]: I1210 00:59:04.884658 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-vf4dp"] Dec 10 00:59:04 crc kubenswrapper[4884]: I1210 00:59:04.886952 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-vf4dp" Dec 10 00:59:04 crc kubenswrapper[4884]: I1210 00:59:04.902168 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-vf4dp"] Dec 10 00:59:04 crc kubenswrapper[4884]: I1210 00:59:04.994110 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/538f59ed-0b58-4b8c-8912-4ebe5ff073f7-combined-ca-bundle\") pod \"heat-db-sync-vf4dp\" (UID: \"538f59ed-0b58-4b8c-8912-4ebe5ff073f7\") " pod="openstack/heat-db-sync-vf4dp" Dec 10 00:59:04 crc kubenswrapper[4884]: I1210 00:59:04.994184 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/538f59ed-0b58-4b8c-8912-4ebe5ff073f7-config-data\") pod \"heat-db-sync-vf4dp\" (UID: \"538f59ed-0b58-4b8c-8912-4ebe5ff073f7\") " pod="openstack/heat-db-sync-vf4dp" Dec 10 00:59:04 crc kubenswrapper[4884]: I1210 00:59:04.994416 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hf5zm\" (UniqueName: \"kubernetes.io/projected/538f59ed-0b58-4b8c-8912-4ebe5ff073f7-kube-api-access-hf5zm\") pod \"heat-db-sync-vf4dp\" (UID: \"538f59ed-0b58-4b8c-8912-4ebe5ff073f7\") " pod="openstack/heat-db-sync-vf4dp" Dec 10 00:59:05 crc kubenswrapper[4884]: I1210 00:59:05.097162 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/538f59ed-0b58-4b8c-8912-4ebe5ff073f7-combined-ca-bundle\") pod \"heat-db-sync-vf4dp\" (UID: \"538f59ed-0b58-4b8c-8912-4ebe5ff073f7\") " pod="openstack/heat-db-sync-vf4dp" Dec 10 00:59:05 crc kubenswrapper[4884]: I1210 00:59:05.097232 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/538f59ed-0b58-4b8c-8912-4ebe5ff073f7-config-data\") pod \"heat-db-sync-vf4dp\" (UID: \"538f59ed-0b58-4b8c-8912-4ebe5ff073f7\") " pod="openstack/heat-db-sync-vf4dp" Dec 10 00:59:05 crc kubenswrapper[4884]: I1210 00:59:05.097275 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hf5zm\" (UniqueName: \"kubernetes.io/projected/538f59ed-0b58-4b8c-8912-4ebe5ff073f7-kube-api-access-hf5zm\") pod \"heat-db-sync-vf4dp\" (UID: \"538f59ed-0b58-4b8c-8912-4ebe5ff073f7\") " pod="openstack/heat-db-sync-vf4dp" Dec 10 00:59:05 crc kubenswrapper[4884]: I1210 00:59:05.103868 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/538f59ed-0b58-4b8c-8912-4ebe5ff073f7-config-data\") pod \"heat-db-sync-vf4dp\" (UID: \"538f59ed-0b58-4b8c-8912-4ebe5ff073f7\") " pod="openstack/heat-db-sync-vf4dp" Dec 10 00:59:05 crc kubenswrapper[4884]: I1210 00:59:05.103917 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/538f59ed-0b58-4b8c-8912-4ebe5ff073f7-combined-ca-bundle\") pod \"heat-db-sync-vf4dp\" (UID: \"538f59ed-0b58-4b8c-8912-4ebe5ff073f7\") " pod="openstack/heat-db-sync-vf4dp" Dec 10 00:59:05 crc kubenswrapper[4884]: I1210 00:59:05.114208 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hf5zm\" (UniqueName: \"kubernetes.io/projected/538f59ed-0b58-4b8c-8912-4ebe5ff073f7-kube-api-access-hf5zm\") pod \"heat-db-sync-vf4dp\" (UID: \"538f59ed-0b58-4b8c-8912-4ebe5ff073f7\") " pod="openstack/heat-db-sync-vf4dp" Dec 10 00:59:05 crc kubenswrapper[4884]: I1210 00:59:05.253773 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-vf4dp" Dec 10 00:59:05 crc kubenswrapper[4884]: I1210 00:59:05.302626 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="175b7d23-6c78-4a15-9f04-f40b83d3a932" path="/var/lib/kubelet/pods/175b7d23-6c78-4a15-9f04-f40b83d3a932/volumes" Dec 10 00:59:05 crc kubenswrapper[4884]: I1210 00:59:05.750089 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-vf4dp"] Dec 10 00:59:05 crc kubenswrapper[4884]: E1210 00:59:05.908778 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 00:59:05 crc kubenswrapper[4884]: E1210 00:59:05.909112 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 00:59:05 crc kubenswrapper[4884]: E1210 00:59:05.909253 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 00:59:05 crc kubenswrapper[4884]: E1210 00:59:05.911075 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 00:59:06 crc kubenswrapper[4884]: I1210 00:59:06.653293 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-vf4dp" event={"ID":"538f59ed-0b58-4b8c-8912-4ebe5ff073f7","Type":"ContainerStarted","Data":"19a35b5fab89654277d73f7bd53b2b7bf0f2ca1d8373fd6f78df6cd1766ee6be"} Dec 10 00:59:06 crc kubenswrapper[4884]: E1210 00:59:06.656152 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 00:59:06 crc kubenswrapper[4884]: I1210 00:59:06.891267 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:59:06 crc kubenswrapper[4884]: I1210 00:59:06.891673 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="ceilometer-central-agent" containerID="cri-o://e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0" gracePeriod=30 Dec 10 00:59:06 crc kubenswrapper[4884]: I1210 00:59:06.891737 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="proxy-httpd" containerID="cri-o://cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567" gracePeriod=30 Dec 10 00:59:06 crc kubenswrapper[4884]: I1210 00:59:06.892007 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="sg-core" containerID="cri-o://3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d" gracePeriod=30 Dec 10 00:59:06 crc kubenswrapper[4884]: I1210 00:59:06.892046 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="ceilometer-notification-agent" containerID="cri-o://46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec" gracePeriod=30 Dec 10 00:59:06 crc kubenswrapper[4884]: I1210 00:59:06.975928 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 00:59:07 crc kubenswrapper[4884]: I1210 00:59:07.341566 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:59:07 crc kubenswrapper[4884]: E1210 00:59:07.341805 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:59:07 crc kubenswrapper[4884]: I1210 00:59:07.702830 4884 generic.go:334] "Generic (PLEG): container finished" podID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerID="cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567" exitCode=0 Dec 10 00:59:07 crc kubenswrapper[4884]: I1210 00:59:07.703221 4884 generic.go:334] "Generic (PLEG): container finished" podID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerID="3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d" exitCode=2 Dec 10 00:59:07 crc kubenswrapper[4884]: I1210 00:59:07.703282 4884 generic.go:334] "Generic (PLEG): container finished" podID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerID="e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0" exitCode=0 Dec 10 00:59:07 crc kubenswrapper[4884]: I1210 00:59:07.702896 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6829ec3-2b83-4cf5-a39a-b48697dac2b9","Type":"ContainerDied","Data":"cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567"} Dec 10 00:59:07 crc kubenswrapper[4884]: I1210 00:59:07.703396 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6829ec3-2b83-4cf5-a39a-b48697dac2b9","Type":"ContainerDied","Data":"3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d"} Dec 10 00:59:07 crc kubenswrapper[4884]: I1210 00:59:07.703409 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6829ec3-2b83-4cf5-a39a-b48697dac2b9","Type":"ContainerDied","Data":"e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0"} Dec 10 00:59:07 crc kubenswrapper[4884]: E1210 00:59:07.705051 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 00:59:08 crc kubenswrapper[4884]: I1210 00:59:08.026003 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 00:59:10 crc kubenswrapper[4884]: I1210 00:59:10.262614 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.239:3000/\": dial tcp 10.217.0.239:3000: connect: connection refused" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.446397 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="7343006c-fda9-4e2d-8767-41ee4412c601" containerName="rabbitmq" containerID="cri-o://78981e56c620a6650251f282dc2bc53d7cb907802cb58320c3a88eefc671b3cd" gracePeriod=604796 Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.567417 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.734457 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-combined-ca-bundle\") pod \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.734518 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-sg-core-conf-yaml\") pod \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.734584 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2l5dg\" (UniqueName: \"kubernetes.io/projected/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-kube-api-access-2l5dg\") pod \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.734622 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-log-httpd\") pod \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.734663 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-config-data\") pod \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.734746 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-run-httpd\") pod \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.734808 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-scripts\") pod \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.734872 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-ceilometer-tls-certs\") pod \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\" (UID: \"e6829ec3-2b83-4cf5-a39a-b48697dac2b9\") " Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.735566 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e6829ec3-2b83-4cf5-a39a-b48697dac2b9" (UID: "e6829ec3-2b83-4cf5-a39a-b48697dac2b9"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.736415 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e6829ec3-2b83-4cf5-a39a-b48697dac2b9" (UID: "e6829ec3-2b83-4cf5-a39a-b48697dac2b9"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.742834 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-kube-api-access-2l5dg" (OuterVolumeSpecName: "kube-api-access-2l5dg") pod "e6829ec3-2b83-4cf5-a39a-b48697dac2b9" (UID: "e6829ec3-2b83-4cf5-a39a-b48697dac2b9"). InnerVolumeSpecName "kube-api-access-2l5dg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.746096 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-scripts" (OuterVolumeSpecName: "scripts") pod "e6829ec3-2b83-4cf5-a39a-b48697dac2b9" (UID: "e6829ec3-2b83-4cf5-a39a-b48697dac2b9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.767242 4884 generic.go:334] "Generic (PLEG): container finished" podID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerID="46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec" exitCode=0 Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.767283 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6829ec3-2b83-4cf5-a39a-b48697dac2b9","Type":"ContainerDied","Data":"46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec"} Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.767309 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e6829ec3-2b83-4cf5-a39a-b48697dac2b9","Type":"ContainerDied","Data":"486ea03e46a26a4b9e6bd31696977fb533cabc9f7b118678b9d847457d84c302"} Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.767326 4884 scope.go:117] "RemoveContainer" containerID="cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.767475 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.774073 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e6829ec3-2b83-4cf5-a39a-b48697dac2b9" (UID: "e6829ec3-2b83-4cf5-a39a-b48697dac2b9"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.824703 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "e6829ec3-2b83-4cf5-a39a-b48697dac2b9" (UID: "e6829ec3-2b83-4cf5-a39a-b48697dac2b9"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.837595 4884 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.837620 4884 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-scripts\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.837633 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.837644 4884 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.837668 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2l5dg\" (UniqueName: \"kubernetes.io/projected/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-kube-api-access-2l5dg\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.837676 4884 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.856595 4884 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="7343006c-fda9-4e2d-8767-41ee4412c601" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.120:5671: connect: connection refused" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.860043 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e6829ec3-2b83-4cf5-a39a-b48697dac2b9" (UID: "e6829ec3-2b83-4cf5-a39a-b48697dac2b9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.863343 4884 scope.go:117] "RemoveContainer" containerID="3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.869752 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-config-data" (OuterVolumeSpecName: "config-data") pod "e6829ec3-2b83-4cf5-a39a-b48697dac2b9" (UID: "e6829ec3-2b83-4cf5-a39a-b48697dac2b9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.885501 4884 scope.go:117] "RemoveContainer" containerID="46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.906580 4884 scope.go:117] "RemoveContainer" containerID="e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.939578 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.939620 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e6829ec3-2b83-4cf5-a39a-b48697dac2b9-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.944002 4884 scope.go:117] "RemoveContainer" containerID="cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567" Dec 10 00:59:11 crc kubenswrapper[4884]: E1210 00:59:11.944580 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567\": container with ID starting with cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567 not found: ID does not exist" containerID="cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.944616 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567"} err="failed to get container status \"cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567\": rpc error: code = NotFound desc = could not find container \"cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567\": container with ID starting with cdd94c9c987ca1d9ccabe602bfea1cd0b9b10daa4555540fd6b4ee1fc93aa567 not found: ID does not exist" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.944640 4884 scope.go:117] "RemoveContainer" containerID="3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d" Dec 10 00:59:11 crc kubenswrapper[4884]: E1210 00:59:11.948658 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d\": container with ID starting with 3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d not found: ID does not exist" containerID="3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.948703 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d"} err="failed to get container status \"3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d\": rpc error: code = NotFound desc = could not find container \"3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d\": container with ID starting with 3b6d0bf1676b2585667b38582fec223eb19f6287506db4e5ef93a51823c61f1d not found: ID does not exist" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.948732 4884 scope.go:117] "RemoveContainer" containerID="46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec" Dec 10 00:59:11 crc kubenswrapper[4884]: E1210 00:59:11.949119 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec\": container with ID starting with 46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec not found: ID does not exist" containerID="46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.949146 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec"} err="failed to get container status \"46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec\": rpc error: code = NotFound desc = could not find container \"46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec\": container with ID starting with 46436a07dc5321d4eb2097475d607c7edf3f2e299c71ad4884da12dd29b3f9ec not found: ID does not exist" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.949161 4884 scope.go:117] "RemoveContainer" containerID="e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0" Dec 10 00:59:11 crc kubenswrapper[4884]: E1210 00:59:11.949472 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0\": container with ID starting with e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0 not found: ID does not exist" containerID="e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0" Dec 10 00:59:11 crc kubenswrapper[4884]: I1210 00:59:11.949492 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0"} err="failed to get container status \"e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0\": rpc error: code = NotFound desc = could not find container \"e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0\": container with ID starting with e6b6fac758acf50db12689adaa1f088ce4976a9105240c3bb369bca3299e6fe0 not found: ID does not exist" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.109641 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.121907 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.140317 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:59:12 crc kubenswrapper[4884]: E1210 00:59:12.140851 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="sg-core" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.140873 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="sg-core" Dec 10 00:59:12 crc kubenswrapper[4884]: E1210 00:59:12.140893 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="ceilometer-central-agent" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.140902 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="ceilometer-central-agent" Dec 10 00:59:12 crc kubenswrapper[4884]: E1210 00:59:12.140914 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="proxy-httpd" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.140923 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="proxy-httpd" Dec 10 00:59:12 crc kubenswrapper[4884]: E1210 00:59:12.140952 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="ceilometer-notification-agent" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.140961 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="ceilometer-notification-agent" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.141271 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="ceilometer-central-agent" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.141308 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="proxy-httpd" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.141334 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="ceilometer-notification-agent" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.141357 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" containerName="sg-core" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.146268 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.157951 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.157960 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.158409 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.158598 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.244463 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.244518 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5p9z\" (UniqueName: \"kubernetes.io/projected/3ca9531f-74d6-4baa-aca5-f734f006210b-kube-api-access-g5p9z\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.244613 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ca9531f-74d6-4baa-aca5-f734f006210b-log-httpd\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.244635 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-scripts\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.244688 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-config-data\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.244742 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ca9531f-74d6-4baa-aca5-f734f006210b-run-httpd\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.244761 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.244804 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.347254 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ca9531f-74d6-4baa-aca5-f734f006210b-run-httpd\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.347679 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.348586 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.348641 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.348690 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5p9z\" (UniqueName: \"kubernetes.io/projected/3ca9531f-74d6-4baa-aca5-f734f006210b-kube-api-access-g5p9z\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.348146 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ca9531f-74d6-4baa-aca5-f734f006210b-run-httpd\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.348900 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ca9531f-74d6-4baa-aca5-f734f006210b-log-httpd\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.348953 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-scripts\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.349022 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-config-data\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.349352 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ca9531f-74d6-4baa-aca5-f734f006210b-log-httpd\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.353955 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-config-data\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.360190 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.360262 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.360411 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.360454 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3ca9531f-74d6-4baa-aca5-f734f006210b-scripts\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.384575 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5p9z\" (UniqueName: \"kubernetes.io/projected/3ca9531f-74d6-4baa-aca5-f734f006210b-kube-api-access-g5p9z\") pod \"ceilometer-0\" (UID: \"3ca9531f-74d6-4baa-aca5-f734f006210b\") " pod="openstack/ceilometer-0" Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.506955 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" containerName="rabbitmq" containerID="cri-o://80aff531440e5df8fdf3ed19c2426ac5174da004004613a3abf3fa9c7b5e565d" gracePeriod=604796 Dec 10 00:59:12 crc kubenswrapper[4884]: I1210 00:59:12.517931 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 10 00:59:13 crc kubenswrapper[4884]: I1210 00:59:13.020360 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 10 00:59:13 crc kubenswrapper[4884]: E1210 00:59:13.155308 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 00:59:13 crc kubenswrapper[4884]: E1210 00:59:13.155389 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 00:59:13 crc kubenswrapper[4884]: E1210 00:59:13.155619 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 00:59:13 crc kubenswrapper[4884]: I1210 00:59:13.304336 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6829ec3-2b83-4cf5-a39a-b48697dac2b9" path="/var/lib/kubelet/pods/e6829ec3-2b83-4cf5-a39a-b48697dac2b9/volumes" Dec 10 00:59:13 crc kubenswrapper[4884]: I1210 00:59:13.796899 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ca9531f-74d6-4baa-aca5-f734f006210b","Type":"ContainerStarted","Data":"42f9a4f6eb48ea1dea51b446ded784b3a2ddaa57506a4a4ce06299e2aed44803"} Dec 10 00:59:14 crc kubenswrapper[4884]: I1210 00:59:14.814517 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ca9531f-74d6-4baa-aca5-f734f006210b","Type":"ContainerStarted","Data":"f2f30d0ceb9fa8dab93a6a05352782c6ba7275e225fc53e8a13dd21a0bea6d83"} Dec 10 00:59:15 crc kubenswrapper[4884]: I1210 00:59:15.830520 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ca9531f-74d6-4baa-aca5-f734f006210b","Type":"ContainerStarted","Data":"7bb36e933311d2d609cf362bd781f70cf8ae2f678fbb802fe4531306baedf02f"} Dec 10 00:59:16 crc kubenswrapper[4884]: E1210 00:59:16.822885 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 00:59:16 crc kubenswrapper[4884]: I1210 00:59:16.841988 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3ca9531f-74d6-4baa-aca5-f734f006210b","Type":"ContainerStarted","Data":"96ec6abb7c433afaaf8b7782f03765736ede31d30663cf947b3eece01405a27d"} Dec 10 00:59:16 crc kubenswrapper[4884]: I1210 00:59:16.842242 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 10 00:59:16 crc kubenswrapper[4884]: E1210 00:59:16.844201 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 00:59:17 crc kubenswrapper[4884]: I1210 00:59:17.852816 4884 generic.go:334] "Generic (PLEG): container finished" podID="7343006c-fda9-4e2d-8767-41ee4412c601" containerID="78981e56c620a6650251f282dc2bc53d7cb907802cb58320c3a88eefc671b3cd" exitCode=0 Dec 10 00:59:17 crc kubenswrapper[4884]: I1210 00:59:17.852889 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7343006c-fda9-4e2d-8767-41ee4412c601","Type":"ContainerDied","Data":"78981e56c620a6650251f282dc2bc53d7cb907802cb58320c3a88eefc671b3cd"} Dec 10 00:59:17 crc kubenswrapper[4884]: E1210 00:59:17.855718 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.062756 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.094325 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7343006c-fda9-4e2d-8767-41ee4412c601-erlang-cookie-secret\") pod \"7343006c-fda9-4e2d-8767-41ee4412c601\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.094731 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-plugins\") pod \"7343006c-fda9-4e2d-8767-41ee4412c601\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.094899 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-plugins-conf\") pod \"7343006c-fda9-4e2d-8767-41ee4412c601\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.095280 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-erlang-cookie\") pod \"7343006c-fda9-4e2d-8767-41ee4412c601\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.095563 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-server-conf\") pod \"7343006c-fda9-4e2d-8767-41ee4412c601\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.095557 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "7343006c-fda9-4e2d-8767-41ee4412c601" (UID: "7343006c-fda9-4e2d-8767-41ee4412c601"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.095715 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "7343006c-fda9-4e2d-8767-41ee4412c601" (UID: "7343006c-fda9-4e2d-8767-41ee4412c601"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.096093 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-tls\") pod \"7343006c-fda9-4e2d-8767-41ee4412c601\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.096130 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "7343006c-fda9-4e2d-8767-41ee4412c601" (UID: "7343006c-fda9-4e2d-8767-41ee4412c601"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.096585 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-config-data\") pod \"7343006c-fda9-4e2d-8767-41ee4412c601\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.097264 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28w4d\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-kube-api-access-28w4d\") pod \"7343006c-fda9-4e2d-8767-41ee4412c601\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.097871 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"7343006c-fda9-4e2d-8767-41ee4412c601\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.098107 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-confd\") pod \"7343006c-fda9-4e2d-8767-41ee4412c601\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.098288 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7343006c-fda9-4e2d-8767-41ee4412c601-pod-info\") pod \"7343006c-fda9-4e2d-8767-41ee4412c601\" (UID: \"7343006c-fda9-4e2d-8767-41ee4412c601\") " Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.099131 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.099273 4884 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.099464 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.104998 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "7343006c-fda9-4e2d-8767-41ee4412c601" (UID: "7343006c-fda9-4e2d-8767-41ee4412c601"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.107575 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-kube-api-access-28w4d" (OuterVolumeSpecName: "kube-api-access-28w4d") pod "7343006c-fda9-4e2d-8767-41ee4412c601" (UID: "7343006c-fda9-4e2d-8767-41ee4412c601"). InnerVolumeSpecName "kube-api-access-28w4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.109898 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "persistence") pod "7343006c-fda9-4e2d-8767-41ee4412c601" (UID: "7343006c-fda9-4e2d-8767-41ee4412c601"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.114055 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7343006c-fda9-4e2d-8767-41ee4412c601-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "7343006c-fda9-4e2d-8767-41ee4412c601" (UID: "7343006c-fda9-4e2d-8767-41ee4412c601"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.124114 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/7343006c-fda9-4e2d-8767-41ee4412c601-pod-info" (OuterVolumeSpecName: "pod-info") pod "7343006c-fda9-4e2d-8767-41ee4412c601" (UID: "7343006c-fda9-4e2d-8767-41ee4412c601"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.176461 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-config-data" (OuterVolumeSpecName: "config-data") pod "7343006c-fda9-4e2d-8767-41ee4412c601" (UID: "7343006c-fda9-4e2d-8767-41ee4412c601"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.203834 4884 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7343006c-fda9-4e2d-8767-41ee4412c601-pod-info\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.204037 4884 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7343006c-fda9-4e2d-8767-41ee4412c601-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.204116 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.204175 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.204244 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28w4d\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-kube-api-access-28w4d\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.204315 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.222264 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-server-conf" (OuterVolumeSpecName: "server-conf") pod "7343006c-fda9-4e2d-8767-41ee4412c601" (UID: "7343006c-fda9-4e2d-8767-41ee4412c601"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.284412 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.308091 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.310679 4884 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7343006c-fda9-4e2d-8767-41ee4412c601-server-conf\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.312052 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "7343006c-fda9-4e2d-8767-41ee4412c601" (UID: "7343006c-fda9-4e2d-8767-41ee4412c601"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.413640 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7343006c-fda9-4e2d-8767-41ee4412c601-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.867168 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7343006c-fda9-4e2d-8767-41ee4412c601","Type":"ContainerDied","Data":"281e970d556f566817222c7905ca9af5b6793b162dc9db9fef8551df498d2d38"} Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.867412 4884 scope.go:117] "RemoveContainer" containerID="78981e56c620a6650251f282dc2bc53d7cb907802cb58320c3a88eefc671b3cd" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.867550 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.896807 4884 generic.go:334] "Generic (PLEG): container finished" podID="6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" containerID="80aff531440e5df8fdf3ed19c2426ac5174da004004613a3abf3fa9c7b5e565d" exitCode=0 Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.896849 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93","Type":"ContainerDied","Data":"80aff531440e5df8fdf3ed19c2426ac5174da004004613a3abf3fa9c7b5e565d"} Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.926532 4884 scope.go:117] "RemoveContainer" containerID="8e10a63d7c4265ae6e3042c0e1fd366e4fec44c8460e8ef66d0de06d49221460" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.938819 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.957504 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.970904 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 00:59:18 crc kubenswrapper[4884]: E1210 00:59:18.973750 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7343006c-fda9-4e2d-8767-41ee4412c601" containerName="rabbitmq" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.973774 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7343006c-fda9-4e2d-8767-41ee4412c601" containerName="rabbitmq" Dec 10 00:59:18 crc kubenswrapper[4884]: E1210 00:59:18.973815 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7343006c-fda9-4e2d-8767-41ee4412c601" containerName="setup-container" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.973821 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7343006c-fda9-4e2d-8767-41ee4412c601" containerName="setup-container" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.974021 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7343006c-fda9-4e2d-8767-41ee4412c601" containerName="rabbitmq" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.975174 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.977822 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.978007 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-88p9q" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.985151 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.988910 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.989093 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.989286 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.989407 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 10 00:59:18 crc kubenswrapper[4884]: I1210 00:59:18.989958 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.128608 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.128666 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.128689 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4z7x7\" (UniqueName: \"kubernetes.io/projected/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-kube-api-access-4z7x7\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.128760 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.128803 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-pod-info\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.128830 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-server-conf\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.128872 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.128896 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-config-data\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.128911 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.128928 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.128943 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.233789 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.234139 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-pod-info\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.234174 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-server-conf\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.234218 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.234246 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-config-data\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.234263 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.234280 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.234299 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.234323 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.234323 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.234356 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.234378 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4z7x7\" (UniqueName: \"kubernetes.io/projected/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-kube-api-access-4z7x7\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.236202 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-config-data\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.236451 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.237045 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.237351 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.237892 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-server-conf\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.251387 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.252653 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-pod-info\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.255762 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.255847 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.264176 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4z7x7\" (UniqueName: \"kubernetes.io/projected/834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4-kube-api-access-4z7x7\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.294157 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4\") " pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.308056 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7343006c-fda9-4e2d-8767-41ee4412c601" path="/var/lib/kubelet/pods/7343006c-fda9-4e2d-8767-41ee4412c601/volumes" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.332274 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.372763 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.437731 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.437776 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-confd\") pod \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.437815 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dkvdf\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-kube-api-access-dkvdf\") pod \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.437835 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-plugins\") pod \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.437871 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-erlang-cookie\") pod \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.437924 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-pod-info\") pod \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.437940 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-erlang-cookie-secret\") pod \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.438000 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-tls\") pod \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.438033 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-config-data\") pod \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.438092 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-plugins-conf\") pod \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.438142 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-server-conf\") pod \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\" (UID: \"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93\") " Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.440173 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" (UID: "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.440388 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" (UID: "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.441011 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" (UID: "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.444395 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "persistence") pod "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" (UID: "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.445299 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-pod-info" (OuterVolumeSpecName: "pod-info") pod "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" (UID: "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.450964 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" (UID: "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.467086 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-kube-api-access-dkvdf" (OuterVolumeSpecName: "kube-api-access-dkvdf") pod "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" (UID: "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93"). InnerVolumeSpecName "kube-api-access-dkvdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.478939 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" (UID: "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.492140 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-config-data" (OuterVolumeSpecName: "config-data") pod "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" (UID: "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.523425 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-server-conf" (OuterVolumeSpecName: "server-conf") pod "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" (UID: "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.541101 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.541129 4884 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.541138 4884 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-server-conf\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.541160 4884 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.541169 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dkvdf\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-kube-api-access-dkvdf\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.541181 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.541190 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.541198 4884 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-pod-info\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.541207 4884 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.541215 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.565078 4884 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.617700 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" (UID: "6b8886f7-cc8b-4a23-bd53-c5e369e0cd93"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.643015 4884 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.643044 4884 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:19 crc kubenswrapper[4884]: W1210 00:59:19.900519 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod834c403c_b8cf_4cbc_ae4b_50a4dbe7ccc4.slice/crio-90ddaa5294bf718a1e1522732ccd7bb1f3c3fa84b9b030a3e78090de2047fbf5 WatchSource:0}: Error finding container 90ddaa5294bf718a1e1522732ccd7bb1f3c3fa84b9b030a3e78090de2047fbf5: Status 404 returned error can't find the container with id 90ddaa5294bf718a1e1522732ccd7bb1f3c3fa84b9b030a3e78090de2047fbf5 Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.901865 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.912686 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"6b8886f7-cc8b-4a23-bd53-c5e369e0cd93","Type":"ContainerDied","Data":"ed0536c73d783f21b855085e886a29c5fb2f3ed8b8ce675791e7d77388d6cefe"} Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.912732 4884 scope.go:117] "RemoveContainer" containerID="80aff531440e5df8fdf3ed19c2426ac5174da004004613a3abf3fa9c7b5e565d" Dec 10 00:59:19 crc kubenswrapper[4884]: I1210 00:59:19.912761 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.039588 4884 scope.go:117] "RemoveContainer" containerID="4e3d15fd0e5ae08056b22ae7da7e8668a86ba45b7eaea66648647fc03028403b" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.071989 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.082820 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.095958 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 00:59:20 crc kubenswrapper[4884]: E1210 00:59:20.096389 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" containerName="rabbitmq" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.096405 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" containerName="rabbitmq" Dec 10 00:59:20 crc kubenswrapper[4884]: E1210 00:59:20.096448 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" containerName="setup-container" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.096456 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" containerName="setup-container" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.096657 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" containerName="rabbitmq" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.097736 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.104486 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.104497 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.104678 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.104821 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-9czbh" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.106656 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.106701 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.106758 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.149905 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.257634 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.257678 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brhnq\" (UniqueName: \"kubernetes.io/projected/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-kube-api-access-brhnq\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.257700 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.257952 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.258006 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.258119 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.258234 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.258328 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.258369 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.258390 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.258513 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.361183 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.361243 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.361273 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.361311 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.361421 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.361471 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brhnq\" (UniqueName: \"kubernetes.io/projected/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-kube-api-access-brhnq\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.361495 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.361567 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.361594 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.361651 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.361739 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.362018 4884 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.362473 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.362960 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.363226 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.363570 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.364905 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.366091 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.367003 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.367617 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.367814 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.386092 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brhnq\" (UniqueName: \"kubernetes.io/projected/1a7f3ac8-d53a-444b-94c8-0ea465ea74b8-kube-api-access-brhnq\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.395642 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8\") " pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.452909 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.927014 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4","Type":"ContainerStarted","Data":"90ddaa5294bf718a1e1522732ccd7bb1f3c3fa84b9b030a3e78090de2047fbf5"} Dec 10 00:59:20 crc kubenswrapper[4884]: I1210 00:59:20.930716 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 10 00:59:20 crc kubenswrapper[4884]: W1210 00:59:20.974597 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1a7f3ac8_d53a_444b_94c8_0ea465ea74b8.slice/crio-13dd224acf24538611e3a9ecb7cdd41b73758aa3792573c0ad6bba65cce24e5c WatchSource:0}: Error finding container 13dd224acf24538611e3a9ecb7cdd41b73758aa3792573c0ad6bba65cce24e5c: Status 404 returned error can't find the container with id 13dd224acf24538611e3a9ecb7cdd41b73758aa3792573c0ad6bba65cce24e5c Dec 10 00:59:21 crc kubenswrapper[4884]: I1210 00:59:21.287271 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:59:21 crc kubenswrapper[4884]: E1210 00:59:21.287864 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:59:21 crc kubenswrapper[4884]: I1210 00:59:21.303625 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b8886f7-cc8b-4a23-bd53-c5e369e0cd93" path="/var/lib/kubelet/pods/6b8886f7-cc8b-4a23-bd53-c5e369e0cd93/volumes" Dec 10 00:59:21 crc kubenswrapper[4884]: I1210 00:59:21.942274 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4","Type":"ContainerStarted","Data":"51ce3b10b355711c9dae31a643913b5c3a63e3318c3b0a7ff8dd73768d3258e0"} Dec 10 00:59:21 crc kubenswrapper[4884]: I1210 00:59:21.943773 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8","Type":"ContainerStarted","Data":"13dd224acf24538611e3a9ecb7cdd41b73758aa3792573c0ad6bba65cce24e5c"} Dec 10 00:59:22 crc kubenswrapper[4884]: I1210 00:59:22.958134 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8","Type":"ContainerStarted","Data":"91926344c97f1f77f3cfe60f01d23407ccce7b5a5c8a97d94344bc1f4c8d09bd"} Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.137965 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-j9zcq"] Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.139930 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.142761 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.156807 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-j9zcq"] Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.243261 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-dns-swift-storage-0\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.243319 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-ovsdbserver-sb\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.243360 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78wxs\" (UniqueName: \"kubernetes.io/projected/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-kube-api-access-78wxs\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.243385 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-config\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.243464 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-dns-svc\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.243512 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-openstack-edpm-ipam\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.243729 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-ovsdbserver-nb\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.346307 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-openstack-edpm-ipam\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.346524 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-ovsdbserver-nb\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.347126 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-openstack-edpm-ipam\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.347729 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-ovsdbserver-nb\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.347929 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-dns-swift-storage-0\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.348803 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-dns-swift-storage-0\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.348919 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-ovsdbserver-sb\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.349690 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-ovsdbserver-sb\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.350124 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78wxs\" (UniqueName: \"kubernetes.io/projected/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-kube-api-access-78wxs\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.350191 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-config\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.350977 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-config\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.351152 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-dns-svc\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.351715 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-dns-svc\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.374539 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78wxs\" (UniqueName: \"kubernetes.io/projected/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-kube-api-access-78wxs\") pod \"dnsmasq-dns-5b75489c6f-j9zcq\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: E1210 00:59:23.407539 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 00:59:23 crc kubenswrapper[4884]: E1210 00:59:23.407895 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 00:59:23 crc kubenswrapper[4884]: E1210 00:59:23.408063 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 00:59:23 crc kubenswrapper[4884]: E1210 00:59:23.409260 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.462264 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:23 crc kubenswrapper[4884]: I1210 00:59:23.976071 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-j9zcq"] Dec 10 00:59:23 crc kubenswrapper[4884]: W1210 00:59:23.978741 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee35f46e_5e1f_44a2_a4ad_83f2fb89dc86.slice/crio-d83108e74984ea9d2d62e220a5d79cd79046aae82dc8f3506907c7e50091cb9d WatchSource:0}: Error finding container d83108e74984ea9d2d62e220a5d79cd79046aae82dc8f3506907c7e50091cb9d: Status 404 returned error can't find the container with id d83108e74984ea9d2d62e220a5d79cd79046aae82dc8f3506907c7e50091cb9d Dec 10 00:59:24 crc kubenswrapper[4884]: I1210 00:59:24.981480 4884 generic.go:334] "Generic (PLEG): container finished" podID="ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" containerID="02a8bd079a117b44df02d29bb9f0b01a6f790e360e2d6f6e530f77fa90e7806b" exitCode=0 Dec 10 00:59:24 crc kubenswrapper[4884]: I1210 00:59:24.981550 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" event={"ID":"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86","Type":"ContainerDied","Data":"02a8bd079a117b44df02d29bb9f0b01a6f790e360e2d6f6e530f77fa90e7806b"} Dec 10 00:59:24 crc kubenswrapper[4884]: I1210 00:59:24.981792 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" event={"ID":"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86","Type":"ContainerStarted","Data":"d83108e74984ea9d2d62e220a5d79cd79046aae82dc8f3506907c7e50091cb9d"} Dec 10 00:59:25 crc kubenswrapper[4884]: I1210 00:59:25.999222 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" event={"ID":"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86","Type":"ContainerStarted","Data":"28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5"} Dec 10 00:59:26 crc kubenswrapper[4884]: I1210 00:59:25.999909 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:26 crc kubenswrapper[4884]: I1210 00:59:26.041841 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" podStartSLOduration=3.04181145 podStartE2EDuration="3.04181145s" podCreationTimestamp="2025-12-10 00:59:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:59:26.036895256 +0000 UTC m=+1739.114852463" watchObservedRunningTime="2025-12-10 00:59:26.04181145 +0000 UTC m=+1739.119768607" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.291487 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.317738 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 10 00:59:33 crc kubenswrapper[4884]: E1210 00:59:33.426045 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 00:59:33 crc kubenswrapper[4884]: E1210 00:59:33.426112 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 00:59:33 crc kubenswrapper[4884]: E1210 00:59:33.426245 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 00:59:33 crc kubenswrapper[4884]: E1210 00:59:33.427485 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.465792 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.549397 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-b4xhk"] Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.549664 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" podUID="ecf73cb4-4dc2-4a18-a755-3c083c7ea571" containerName="dnsmasq-dns" containerID="cri-o://210db8a95b30465c4c9dc64069918bef6de762b64ebf7ecea2a087736e9ec629" gracePeriod=10 Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.717945 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk"] Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.723618 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.761887 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk"] Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.788169 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-config\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.788208 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mf86c\" (UniqueName: \"kubernetes.io/projected/2ee52c8c-feab-476d-844c-19e007cf6e40-kube-api-access-mf86c\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.788231 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-openstack-edpm-ipam\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.788248 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-ovsdbserver-nb\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.788949 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-ovsdbserver-sb\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.789023 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-dns-swift-storage-0\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.789069 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-dns-svc\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.890761 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-config\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.890809 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mf86c\" (UniqueName: \"kubernetes.io/projected/2ee52c8c-feab-476d-844c-19e007cf6e40-kube-api-access-mf86c\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.890830 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-openstack-edpm-ipam\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.890846 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-ovsdbserver-nb\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.890921 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-ovsdbserver-sb\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.890946 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-dns-swift-storage-0\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.890967 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-dns-svc\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.891769 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-config\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.891912 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-openstack-edpm-ipam\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.892945 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-dns-svc\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.893114 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-dns-swift-storage-0\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.894108 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-ovsdbserver-nb\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.894259 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ee52c8c-feab-476d-844c-19e007cf6e40-ovsdbserver-sb\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:33 crc kubenswrapper[4884]: I1210 00:59:33.917039 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mf86c\" (UniqueName: \"kubernetes.io/projected/2ee52c8c-feab-476d-844c-19e007cf6e40-kube-api-access-mf86c\") pod \"dnsmasq-dns-5cf7b6cbf7-7jxlk\" (UID: \"2ee52c8c-feab-476d-844c-19e007cf6e40\") " pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.060475 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.098827 4884 generic.go:334] "Generic (PLEG): container finished" podID="ecf73cb4-4dc2-4a18-a755-3c083c7ea571" containerID="210db8a95b30465c4c9dc64069918bef6de762b64ebf7ecea2a087736e9ec629" exitCode=0 Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.098915 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" event={"ID":"ecf73cb4-4dc2-4a18-a755-3c083c7ea571","Type":"ContainerDied","Data":"210db8a95b30465c4c9dc64069918bef6de762b64ebf7ecea2a087736e9ec629"} Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.098970 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" event={"ID":"ecf73cb4-4dc2-4a18-a755-3c083c7ea571","Type":"ContainerDied","Data":"6cc0c8c1dce67e89204df137c21bc62a55c0f36b86484a41c0653e008a863584"} Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.098980 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6cc0c8c1dce67e89204df137c21bc62a55c0f36b86484a41c0653e008a863584" Dec 10 00:59:34 crc kubenswrapper[4884]: E1210 00:59:34.099957 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.199028 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.304733 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-ovsdbserver-sb\") pod \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.305085 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-config\") pod \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.305131 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fb52z\" (UniqueName: \"kubernetes.io/projected/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-kube-api-access-fb52z\") pod \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.305165 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-dns-swift-storage-0\") pod \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.305234 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-dns-svc\") pod \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.305258 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-ovsdbserver-nb\") pod \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\" (UID: \"ecf73cb4-4dc2-4a18-a755-3c083c7ea571\") " Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.312713 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-kube-api-access-fb52z" (OuterVolumeSpecName: "kube-api-access-fb52z") pod "ecf73cb4-4dc2-4a18-a755-3c083c7ea571" (UID: "ecf73cb4-4dc2-4a18-a755-3c083c7ea571"). InnerVolumeSpecName "kube-api-access-fb52z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.375216 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ecf73cb4-4dc2-4a18-a755-3c083c7ea571" (UID: "ecf73cb4-4dc2-4a18-a755-3c083c7ea571"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.379026 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ecf73cb4-4dc2-4a18-a755-3c083c7ea571" (UID: "ecf73cb4-4dc2-4a18-a755-3c083c7ea571"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.389110 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ecf73cb4-4dc2-4a18-a755-3c083c7ea571" (UID: "ecf73cb4-4dc2-4a18-a755-3c083c7ea571"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.394302 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-config" (OuterVolumeSpecName: "config") pod "ecf73cb4-4dc2-4a18-a755-3c083c7ea571" (UID: "ecf73cb4-4dc2-4a18-a755-3c083c7ea571"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.409122 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.409151 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.409161 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fb52z\" (UniqueName: \"kubernetes.io/projected/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-kube-api-access-fb52z\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.409170 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.409177 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.432131 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ecf73cb4-4dc2-4a18-a755-3c083c7ea571" (UID: "ecf73cb4-4dc2-4a18-a755-3c083c7ea571"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.511191 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ecf73cb4-4dc2-4a18-a755-3c083c7ea571-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:34 crc kubenswrapper[4884]: W1210 00:59:34.607099 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2ee52c8c_feab_476d_844c_19e007cf6e40.slice/crio-342f84e2771f712b71e19e9d4db47fccdb7ac6adc01f82692ec886ef582edf7e WatchSource:0}: Error finding container 342f84e2771f712b71e19e9d4db47fccdb7ac6adc01f82692ec886ef582edf7e: Status 404 returned error can't find the container with id 342f84e2771f712b71e19e9d4db47fccdb7ac6adc01f82692ec886ef582edf7e Dec 10 00:59:34 crc kubenswrapper[4884]: I1210 00:59:34.614277 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk"] Dec 10 00:59:35 crc kubenswrapper[4884]: I1210 00:59:35.112746 4884 generic.go:334] "Generic (PLEG): container finished" podID="2ee52c8c-feab-476d-844c-19e007cf6e40" containerID="bf3eca6126203b57e1398a848eaf0f9cc713189288cd60705e91c408a97f0ec3" exitCode=0 Dec 10 00:59:35 crc kubenswrapper[4884]: I1210 00:59:35.113122 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-b4xhk" Dec 10 00:59:35 crc kubenswrapper[4884]: I1210 00:59:35.112828 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" event={"ID":"2ee52c8c-feab-476d-844c-19e007cf6e40","Type":"ContainerDied","Data":"bf3eca6126203b57e1398a848eaf0f9cc713189288cd60705e91c408a97f0ec3"} Dec 10 00:59:35 crc kubenswrapper[4884]: I1210 00:59:35.113356 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" event={"ID":"2ee52c8c-feab-476d-844c-19e007cf6e40","Type":"ContainerStarted","Data":"342f84e2771f712b71e19e9d4db47fccdb7ac6adc01f82692ec886ef582edf7e"} Dec 10 00:59:35 crc kubenswrapper[4884]: I1210 00:59:35.287239 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:59:35 crc kubenswrapper[4884]: E1210 00:59:35.287500 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:59:35 crc kubenswrapper[4884]: I1210 00:59:35.366487 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-b4xhk"] Dec 10 00:59:35 crc kubenswrapper[4884]: I1210 00:59:35.375681 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-b4xhk"] Dec 10 00:59:36 crc kubenswrapper[4884]: I1210 00:59:36.130002 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" event={"ID":"2ee52c8c-feab-476d-844c-19e007cf6e40","Type":"ContainerStarted","Data":"552bf6be190750c280b027dfb88e825f7995b0856d13e0c93423067daeff3589"} Dec 10 00:59:36 crc kubenswrapper[4884]: I1210 00:59:36.130353 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:36 crc kubenswrapper[4884]: I1210 00:59:36.171467 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" podStartSLOduration=3.171400266 podStartE2EDuration="3.171400266s" podCreationTimestamp="2025-12-10 00:59:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:59:36.160985273 +0000 UTC m=+1749.238942480" watchObservedRunningTime="2025-12-10 00:59:36.171400266 +0000 UTC m=+1749.249357433" Dec 10 00:59:36 crc kubenswrapper[4884]: E1210 00:59:36.291681 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 00:59:37 crc kubenswrapper[4884]: I1210 00:59:37.312529 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ecf73cb4-4dc2-4a18-a755-3c083c7ea571" path="/var/lib/kubelet/pods/ecf73cb4-4dc2-4a18-a755-3c083c7ea571/volumes" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.063579 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5cf7b6cbf7-7jxlk" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.168412 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-j9zcq"] Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.168681 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" podUID="ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" containerName="dnsmasq-dns" containerID="cri-o://28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5" gracePeriod=10 Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.682918 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.866187 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-openstack-edpm-ipam\") pod \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.866229 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-ovsdbserver-sb\") pod \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.866300 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78wxs\" (UniqueName: \"kubernetes.io/projected/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-kube-api-access-78wxs\") pod \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.866394 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-dns-swift-storage-0\") pod \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.866421 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-dns-svc\") pod \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.866553 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-ovsdbserver-nb\") pod \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.866610 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-config\") pod \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\" (UID: \"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86\") " Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.874696 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-kube-api-access-78wxs" (OuterVolumeSpecName: "kube-api-access-78wxs") pod "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" (UID: "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86"). InnerVolumeSpecName "kube-api-access-78wxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.940382 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" (UID: "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.941031 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" (UID: "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.947075 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" (UID: "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.947379 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" (UID: "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.965668 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-config" (OuterVolumeSpecName: "config") pod "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" (UID: "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.969098 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78wxs\" (UniqueName: \"kubernetes.io/projected/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-kube-api-access-78wxs\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.969129 4884 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.969144 4884 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.969158 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.969170 4884 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-config\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:44 crc kubenswrapper[4884]: I1210 00:59:44.969181 4884 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:45 crc kubenswrapper[4884]: I1210 00:59:45.002228 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" (UID: "ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 00:59:45 crc kubenswrapper[4884]: I1210 00:59:45.071832 4884 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 10 00:59:45 crc kubenswrapper[4884]: I1210 00:59:45.264848 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 00:59:45 crc kubenswrapper[4884]: I1210 00:59:45.264856 4884 generic.go:334] "Generic (PLEG): container finished" podID="ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" containerID="28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5" exitCode=0 Dec 10 00:59:45 crc kubenswrapper[4884]: I1210 00:59:45.264889 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" event={"ID":"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86","Type":"ContainerDied","Data":"28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5"} Dec 10 00:59:45 crc kubenswrapper[4884]: I1210 00:59:45.265613 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" event={"ID":"ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86","Type":"ContainerDied","Data":"d83108e74984ea9d2d62e220a5d79cd79046aae82dc8f3506907c7e50091cb9d"} Dec 10 00:59:45 crc kubenswrapper[4884]: I1210 00:59:45.265663 4884 scope.go:117] "RemoveContainer" containerID="28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5" Dec 10 00:59:45 crc kubenswrapper[4884]: E1210 00:59:45.288740 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 00:59:45 crc kubenswrapper[4884]: I1210 00:59:45.353499 4884 scope.go:117] "RemoveContainer" containerID="02a8bd079a117b44df02d29bb9f0b01a6f790e360e2d6f6e530f77fa90e7806b" Dec 10 00:59:45 crc kubenswrapper[4884]: I1210 00:59:45.380110 4884 scope.go:117] "RemoveContainer" containerID="28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5" Dec 10 00:59:45 crc kubenswrapper[4884]: E1210 00:59:45.380565 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5\": container with ID starting with 28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5 not found: ID does not exist" containerID="28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5" Dec 10 00:59:45 crc kubenswrapper[4884]: I1210 00:59:45.380603 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5"} err="failed to get container status \"28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5\": rpc error: code = NotFound desc = could not find container \"28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5\": container with ID starting with 28b7ac3542293343c0d52d471b22604060d8d32868489635ee8b690ff8c61fa5 not found: ID does not exist" Dec 10 00:59:45 crc kubenswrapper[4884]: I1210 00:59:45.380636 4884 scope.go:117] "RemoveContainer" containerID="02a8bd079a117b44df02d29bb9f0b01a6f790e360e2d6f6e530f77fa90e7806b" Dec 10 00:59:45 crc kubenswrapper[4884]: E1210 00:59:45.381010 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02a8bd079a117b44df02d29bb9f0b01a6f790e360e2d6f6e530f77fa90e7806b\": container with ID starting with 02a8bd079a117b44df02d29bb9f0b01a6f790e360e2d6f6e530f77fa90e7806b not found: ID does not exist" containerID="02a8bd079a117b44df02d29bb9f0b01a6f790e360e2d6f6e530f77fa90e7806b" Dec 10 00:59:45 crc kubenswrapper[4884]: I1210 00:59:45.381038 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02a8bd079a117b44df02d29bb9f0b01a6f790e360e2d6f6e530f77fa90e7806b"} err="failed to get container status \"02a8bd079a117b44df02d29bb9f0b01a6f790e360e2d6f6e530f77fa90e7806b\": rpc error: code = NotFound desc = could not find container \"02a8bd079a117b44df02d29bb9f0b01a6f790e360e2d6f6e530f77fa90e7806b\": container with ID starting with 02a8bd079a117b44df02d29bb9f0b01a6f790e360e2d6f6e530f77fa90e7806b not found: ID does not exist" Dec 10 00:59:46 crc kubenswrapper[4884]: I1210 00:59:46.845922 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 00:59:46 crc kubenswrapper[4884]: E1210 00:59:46.846185 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 00:59:48 crc kubenswrapper[4884]: E1210 00:59:48.409888 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 00:59:48 crc kubenswrapper[4884]: E1210 00:59:48.410463 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 00:59:48 crc kubenswrapper[4884]: E1210 00:59:48.410595 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 00:59:48 crc kubenswrapper[4884]: E1210 00:59:48.411879 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 00:59:54 crc kubenswrapper[4884]: I1210 00:59:54.021424 4884 generic.go:334] "Generic (PLEG): container finished" podID="834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4" containerID="51ce3b10b355711c9dae31a643913b5c3a63e3318c3b0a7ff8dd73768d3258e0" exitCode=0 Dec 10 00:59:54 crc kubenswrapper[4884]: I1210 00:59:54.021519 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4","Type":"ContainerDied","Data":"51ce3b10b355711c9dae31a643913b5c3a63e3318c3b0a7ff8dd73768d3258e0"} Dec 10 00:59:55 crc kubenswrapper[4884]: I1210 00:59:55.035098 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4","Type":"ContainerStarted","Data":"ed8bf7c34958c7869ee950907afc25ec32084d1e9a930a896227abe50a6d727c"} Dec 10 00:59:55 crc kubenswrapper[4884]: I1210 00:59:55.036420 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 10 00:59:55 crc kubenswrapper[4884]: I1210 00:59:55.036861 4884 generic.go:334] "Generic (PLEG): container finished" podID="1a7f3ac8-d53a-444b-94c8-0ea465ea74b8" containerID="91926344c97f1f77f3cfe60f01d23407ccce7b5a5c8a97d94344bc1f4c8d09bd" exitCode=0 Dec 10 00:59:55 crc kubenswrapper[4884]: I1210 00:59:55.036902 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8","Type":"ContainerDied","Data":"91926344c97f1f77f3cfe60f01d23407ccce7b5a5c8a97d94344bc1f4c8d09bd"} Dec 10 00:59:55 crc kubenswrapper[4884]: I1210 00:59:55.067839 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.067825296 podStartE2EDuration="37.067825296s" podCreationTimestamp="2025-12-10 00:59:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:59:55.06578422 +0000 UTC m=+1768.143741347" watchObservedRunningTime="2025-12-10 00:59:55.067825296 +0000 UTC m=+1768.145782413" Dec 10 00:59:56 crc kubenswrapper[4884]: I1210 00:59:56.048801 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1a7f3ac8-d53a-444b-94c8-0ea465ea74b8","Type":"ContainerStarted","Data":"49dac579e72b74fa4ec718abb07b266574dd488076808cf2ce22b4b751e4c324"} Dec 10 00:59:56 crc kubenswrapper[4884]: I1210 00:59:56.049373 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 10 00:59:56 crc kubenswrapper[4884]: I1210 00:59:56.089515 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.089492428 podStartE2EDuration="36.089492428s" podCreationTimestamp="2025-12-10 00:59:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 00:59:56.074326326 +0000 UTC m=+1769.152283473" watchObservedRunningTime="2025-12-10 00:59:56.089492428 +0000 UTC m=+1769.167449555" Dec 10 00:59:56 crc kubenswrapper[4884]: E1210 00:59:56.411028 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 00:59:56 crc kubenswrapper[4884]: E1210 00:59:56.411097 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 00:59:56 crc kubenswrapper[4884]: E1210 00:59:56.411226 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 00:59:56 crc kubenswrapper[4884]: E1210 00:59:56.412563 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.375556 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp"] Dec 10 00:59:57 crc kubenswrapper[4884]: E1210 00:59:57.376204 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecf73cb4-4dc2-4a18-a755-3c083c7ea571" containerName="dnsmasq-dns" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.376217 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecf73cb4-4dc2-4a18-a755-3c083c7ea571" containerName="dnsmasq-dns" Dec 10 00:59:57 crc kubenswrapper[4884]: E1210 00:59:57.376242 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecf73cb4-4dc2-4a18-a755-3c083c7ea571" containerName="init" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.376251 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecf73cb4-4dc2-4a18-a755-3c083c7ea571" containerName="init" Dec 10 00:59:57 crc kubenswrapper[4884]: E1210 00:59:57.376264 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" containerName="dnsmasq-dns" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.376273 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" containerName="dnsmasq-dns" Dec 10 00:59:57 crc kubenswrapper[4884]: E1210 00:59:57.376304 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" containerName="init" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.376310 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" containerName="init" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.378150 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" containerName="dnsmasq-dns" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.378202 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecf73cb4-4dc2-4a18-a755-3c083c7ea571" containerName="dnsmasq-dns" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.379090 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.381325 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.384173 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.384305 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.384419 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.397786 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp"] Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.496318 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.496369 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2m84h\" (UniqueName: \"kubernetes.io/projected/357a1ded-5f10-42db-a1d4-ed63c8297d3b-kube-api-access-2m84h\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.496418 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.496499 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.598162 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.598293 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.598322 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2m84h\" (UniqueName: \"kubernetes.io/projected/357a1ded-5f10-42db-a1d4-ed63c8297d3b-kube-api-access-2m84h\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.598366 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.606947 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.611030 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.612920 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.625290 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2m84h\" (UniqueName: \"kubernetes.io/projected/357a1ded-5f10-42db-a1d4-ed63c8297d3b-kube-api-access-2m84h\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:57 crc kubenswrapper[4884]: I1210 00:59:57.697300 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 00:59:58 crc kubenswrapper[4884]: I1210 00:59:58.293963 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp"] Dec 10 00:59:59 crc kubenswrapper[4884]: I1210 00:59:59.111878 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" event={"ID":"357a1ded-5f10-42db-a1d4-ed63c8297d3b","Type":"ContainerStarted","Data":"331eadbeae5a7c1af18ddc4051d16235f2840d1539cd88b5dcbe316bcba81304"} Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.136208 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2"] Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.141509 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.145486 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2"] Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.147297 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.147584 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.264981 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfl8z\" (UniqueName: \"kubernetes.io/projected/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-kube-api-access-lfl8z\") pod \"collect-profiles-29422140-thtq2\" (UID: \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.265171 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-config-volume\") pod \"collect-profiles-29422140-thtq2\" (UID: \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.265479 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-secret-volume\") pod \"collect-profiles-29422140-thtq2\" (UID: \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.287730 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 01:00:00 crc kubenswrapper[4884]: E1210 01:00:00.288406 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:00:00 crc kubenswrapper[4884]: E1210 01:00:00.293076 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.367548 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-secret-volume\") pod \"collect-profiles-29422140-thtq2\" (UID: \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.367862 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfl8z\" (UniqueName: \"kubernetes.io/projected/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-kube-api-access-lfl8z\") pod \"collect-profiles-29422140-thtq2\" (UID: \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.367950 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-config-volume\") pod \"collect-profiles-29422140-thtq2\" (UID: \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.369241 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-config-volume\") pod \"collect-profiles-29422140-thtq2\" (UID: \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.386266 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfl8z\" (UniqueName: \"kubernetes.io/projected/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-kube-api-access-lfl8z\") pod \"collect-profiles-29422140-thtq2\" (UID: \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.387111 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-secret-volume\") pod \"collect-profiles-29422140-thtq2\" (UID: \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:00 crc kubenswrapper[4884]: I1210 01:00:00.475581 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:08 crc kubenswrapper[4884]: I1210 01:00:08.233376 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" event={"ID":"357a1ded-5f10-42db-a1d4-ed63c8297d3b","Type":"ContainerStarted","Data":"4c1bbea500ebeb9170481600cd805bcb57d63fe551394ca5730b60fc2a2e1522"} Dec 10 01:00:08 crc kubenswrapper[4884]: I1210 01:00:08.259258 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" podStartSLOduration=1.744743278 podStartE2EDuration="11.259238423s" podCreationTimestamp="2025-12-10 00:59:57 +0000 UTC" firstStartedPulling="2025-12-10 00:59:58.290775735 +0000 UTC m=+1771.368732852" lastFinishedPulling="2025-12-10 01:00:07.80527088 +0000 UTC m=+1780.883227997" observedRunningTime="2025-12-10 01:00:08.257190047 +0000 UTC m=+1781.335147174" watchObservedRunningTime="2025-12-10 01:00:08.259238423 +0000 UTC m=+1781.337195550" Dec 10 01:00:08 crc kubenswrapper[4884]: E1210 01:00:08.289414 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:00:08 crc kubenswrapper[4884]: I1210 01:00:08.432968 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2"] Dec 10 01:00:09 crc kubenswrapper[4884]: I1210 01:00:09.255172 4884 generic.go:334] "Generic (PLEG): container finished" podID="e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823" containerID="1fed379f7bc6284328a126c0c54af4f8c812a3f1b854cdd0b3c607bb245cbfac" exitCode=0 Dec 10 01:00:09 crc kubenswrapper[4884]: I1210 01:00:09.255318 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" event={"ID":"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823","Type":"ContainerDied","Data":"1fed379f7bc6284328a126c0c54af4f8c812a3f1b854cdd0b3c607bb245cbfac"} Dec 10 01:00:09 crc kubenswrapper[4884]: I1210 01:00:09.255492 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" event={"ID":"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823","Type":"ContainerStarted","Data":"6fd91ca5d8eaef3cfa4c62c207d98e6d95918ef31d2a531822de193c59338ae4"} Dec 10 01:00:09 crc kubenswrapper[4884]: I1210 01:00:09.376626 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 10 01:00:10 crc kubenswrapper[4884]: I1210 01:00:10.458796 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 10 01:00:10 crc kubenswrapper[4884]: I1210 01:00:10.685256 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:10 crc kubenswrapper[4884]: I1210 01:00:10.827372 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-secret-volume\") pod \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\" (UID: \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\") " Dec 10 01:00:10 crc kubenswrapper[4884]: I1210 01:00:10.827730 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-config-volume\") pod \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\" (UID: \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\") " Dec 10 01:00:10 crc kubenswrapper[4884]: I1210 01:00:10.827970 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfl8z\" (UniqueName: \"kubernetes.io/projected/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-kube-api-access-lfl8z\") pod \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\" (UID: \"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823\") " Dec 10 01:00:10 crc kubenswrapper[4884]: I1210 01:00:10.828297 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-config-volume" (OuterVolumeSpecName: "config-volume") pod "e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823" (UID: "e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 01:00:10 crc kubenswrapper[4884]: I1210 01:00:10.828959 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 01:00:10 crc kubenswrapper[4884]: I1210 01:00:10.834667 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-kube-api-access-lfl8z" (OuterVolumeSpecName: "kube-api-access-lfl8z") pod "e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823" (UID: "e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823"). InnerVolumeSpecName "kube-api-access-lfl8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:00:10 crc kubenswrapper[4884]: I1210 01:00:10.839128 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823" (UID: "e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:00:10 crc kubenswrapper[4884]: I1210 01:00:10.931621 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 01:00:10 crc kubenswrapper[4884]: I1210 01:00:10.931663 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfl8z\" (UniqueName: \"kubernetes.io/projected/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823-kube-api-access-lfl8z\") on node \"crc\" DevicePath \"\"" Dec 10 01:00:11 crc kubenswrapper[4884]: I1210 01:00:11.276292 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" event={"ID":"e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823","Type":"ContainerDied","Data":"6fd91ca5d8eaef3cfa4c62c207d98e6d95918ef31d2a531822de193c59338ae4"} Dec 10 01:00:11 crc kubenswrapper[4884]: I1210 01:00:11.276330 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6fd91ca5d8eaef3cfa4c62c207d98e6d95918ef31d2a531822de193c59338ae4" Dec 10 01:00:11 crc kubenswrapper[4884]: I1210 01:00:11.276360 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2" Dec 10 01:00:11 crc kubenswrapper[4884]: I1210 01:00:11.287381 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 01:00:11 crc kubenswrapper[4884]: E1210 01:00:11.287818 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:00:11 crc kubenswrapper[4884]: E1210 01:00:11.290237 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:00:15 crc kubenswrapper[4884]: I1210 01:00:15.333570 4884 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podee35f46e-5e1f-44a2-a4ad-83f2fb89dc86"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podee35f46e-5e1f-44a2-a4ad-83f2fb89dc86] : Timed out while waiting for systemd to remove kubepods-besteffort-podee35f46e_5e1f_44a2_a4ad_83f2fb89dc86.slice" Dec 10 01:00:15 crc kubenswrapper[4884]: E1210 01:00:15.334037 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort podee35f46e-5e1f-44a2-a4ad-83f2fb89dc86] : unable to destroy cgroup paths for cgroup [kubepods besteffort podee35f46e-5e1f-44a2-a4ad-83f2fb89dc86] : Timed out while waiting for systemd to remove kubepods-besteffort-podee35f46e_5e1f_44a2_a4ad_83f2fb89dc86.slice" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" podUID="ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" Dec 10 01:00:15 crc kubenswrapper[4884]: I1210 01:00:15.351014 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-j9zcq" Dec 10 01:00:15 crc kubenswrapper[4884]: I1210 01:00:15.386983 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-j9zcq"] Dec 10 01:00:15 crc kubenswrapper[4884]: I1210 01:00:15.399257 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-j9zcq"] Dec 10 01:00:17 crc kubenswrapper[4884]: I1210 01:00:17.303022 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86" path="/var/lib/kubelet/pods/ee35f46e-5e1f-44a2-a4ad-83f2fb89dc86/volumes" Dec 10 01:00:20 crc kubenswrapper[4884]: I1210 01:00:20.417692 4884 generic.go:334] "Generic (PLEG): container finished" podID="357a1ded-5f10-42db-a1d4-ed63c8297d3b" containerID="4c1bbea500ebeb9170481600cd805bcb57d63fe551394ca5730b60fc2a2e1522" exitCode=0 Dec 10 01:00:20 crc kubenswrapper[4884]: I1210 01:00:20.417755 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" event={"ID":"357a1ded-5f10-42db-a1d4-ed63c8297d3b","Type":"ContainerDied","Data":"4c1bbea500ebeb9170481600cd805bcb57d63fe551394ca5730b60fc2a2e1522"} Dec 10 01:00:21 crc kubenswrapper[4884]: E1210 01:00:21.289517 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.013213 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.033533 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-inventory\") pod \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.033625 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-repo-setup-combined-ca-bundle\") pod \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.033841 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-ssh-key\") pod \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.033885 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2m84h\" (UniqueName: \"kubernetes.io/projected/357a1ded-5f10-42db-a1d4-ed63c8297d3b-kube-api-access-2m84h\") pod \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\" (UID: \"357a1ded-5f10-42db-a1d4-ed63c8297d3b\") " Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.042055 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "357a1ded-5f10-42db-a1d4-ed63c8297d3b" (UID: "357a1ded-5f10-42db-a1d4-ed63c8297d3b"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.046170 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/357a1ded-5f10-42db-a1d4-ed63c8297d3b-kube-api-access-2m84h" (OuterVolumeSpecName: "kube-api-access-2m84h") pod "357a1ded-5f10-42db-a1d4-ed63c8297d3b" (UID: "357a1ded-5f10-42db-a1d4-ed63c8297d3b"). InnerVolumeSpecName "kube-api-access-2m84h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.091061 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-inventory" (OuterVolumeSpecName: "inventory") pod "357a1ded-5f10-42db-a1d4-ed63c8297d3b" (UID: "357a1ded-5f10-42db-a1d4-ed63c8297d3b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.092782 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "357a1ded-5f10-42db-a1d4-ed63c8297d3b" (UID: "357a1ded-5f10-42db-a1d4-ed63c8297d3b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.135972 4884 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.136146 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.136237 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2m84h\" (UniqueName: \"kubernetes.io/projected/357a1ded-5f10-42db-a1d4-ed63c8297d3b-kube-api-access-2m84h\") on node \"crc\" DevicePath \"\"" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.136351 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/357a1ded-5f10-42db-a1d4-ed63c8297d3b-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.461292 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" event={"ID":"357a1ded-5f10-42db-a1d4-ed63c8297d3b","Type":"ContainerDied","Data":"331eadbeae5a7c1af18ddc4051d16235f2840d1539cd88b5dcbe316bcba81304"} Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.461363 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="331eadbeae5a7c1af18ddc4051d16235f2840d1539cd88b5dcbe316bcba81304" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.461417 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.555332 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb"] Dec 10 01:00:22 crc kubenswrapper[4884]: E1210 01:00:22.555832 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="357a1ded-5f10-42db-a1d4-ed63c8297d3b" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.555853 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="357a1ded-5f10-42db-a1d4-ed63c8297d3b" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 10 01:00:22 crc kubenswrapper[4884]: E1210 01:00:22.555875 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823" containerName="collect-profiles" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.555884 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823" containerName="collect-profiles" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.556145 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="357a1ded-5f10-42db-a1d4-ed63c8297d3b" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.556175 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823" containerName="collect-profiles" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.557233 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.561197 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.562629 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.565019 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.565104 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.582491 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb"] Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.646199 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.646298 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.646361 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pxcg\" (UniqueName: \"kubernetes.io/projected/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-kube-api-access-7pxcg\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.646611 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.748734 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.748833 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.748891 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.748935 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pxcg\" (UniqueName: \"kubernetes.io/projected/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-kube-api-access-7pxcg\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.755912 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.756214 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.756330 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.782425 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pxcg\" (UniqueName: \"kubernetes.io/projected/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-kube-api-access-7pxcg\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:22 crc kubenswrapper[4884]: I1210 01:00:22.878604 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:00:23 crc kubenswrapper[4884]: I1210 01:00:23.287574 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 01:00:23 crc kubenswrapper[4884]: E1210 01:00:23.287925 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:00:23 crc kubenswrapper[4884]: I1210 01:00:23.508640 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb"] Dec 10 01:00:24 crc kubenswrapper[4884]: E1210 01:00:24.289761 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:00:24 crc kubenswrapper[4884]: I1210 01:00:24.488484 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" event={"ID":"1a04a509-b2c8-4fd6-8443-24aa317b7eb8","Type":"ContainerStarted","Data":"f0186d31d7aa4c0bc651088601688cde1a0fe4f1b3ab7c26135e5c9d4b1de61a"} Dec 10 01:00:26 crc kubenswrapper[4884]: I1210 01:00:26.519674 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" event={"ID":"1a04a509-b2c8-4fd6-8443-24aa317b7eb8","Type":"ContainerStarted","Data":"06907428237f5a80f36c1cfd2e94f12e614023d8ab86935144889002f058ceaa"} Dec 10 01:00:26 crc kubenswrapper[4884]: I1210 01:00:26.550139 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" podStartSLOduration=2.417544518 podStartE2EDuration="4.550112523s" podCreationTimestamp="2025-12-10 01:00:22 +0000 UTC" firstStartedPulling="2025-12-10 01:00:23.518692249 +0000 UTC m=+1796.596649376" lastFinishedPulling="2025-12-10 01:00:25.651260264 +0000 UTC m=+1798.729217381" observedRunningTime="2025-12-10 01:00:26.54707717 +0000 UTC m=+1799.625034307" watchObservedRunningTime="2025-12-10 01:00:26.550112523 +0000 UTC m=+1799.628069670" Dec 10 01:00:34 crc kubenswrapper[4884]: I1210 01:00:34.286933 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 01:00:34 crc kubenswrapper[4884]: E1210 01:00:34.288013 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:00:36 crc kubenswrapper[4884]: E1210 01:00:36.291543 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:00:37 crc kubenswrapper[4884]: E1210 01:00:37.447742 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:00:37 crc kubenswrapper[4884]: E1210 01:00:37.448213 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:00:37 crc kubenswrapper[4884]: E1210 01:00:37.448420 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:00:37 crc kubenswrapper[4884]: E1210 01:00:37.449745 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:00:38 crc kubenswrapper[4884]: I1210 01:00:38.265556 4884 scope.go:117] "RemoveContainer" containerID="e2c3c1f94be9b1b02accfe5e6b6384942ba2fedea4b7602541e3ffaaa879fcac" Dec 10 01:00:38 crc kubenswrapper[4884]: I1210 01:00:38.315141 4884 scope.go:117] "RemoveContainer" containerID="ca6b9c1be04a3c343b8791a5d25231a9ffbb990934461979bc2e463f071ad1f0" Dec 10 01:00:45 crc kubenswrapper[4884]: I1210 01:00:45.287365 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 01:00:45 crc kubenswrapper[4884]: E1210 01:00:45.288454 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:00:49 crc kubenswrapper[4884]: E1210 01:00:49.393039 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:00:49 crc kubenswrapper[4884]: E1210 01:00:49.394037 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:00:49 crc kubenswrapper[4884]: E1210 01:00:49.394226 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:00:49 crc kubenswrapper[4884]: E1210 01:00:49.395957 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:00:51 crc kubenswrapper[4884]: E1210 01:00:51.289793 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:00:57 crc kubenswrapper[4884]: I1210 01:00:57.294925 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 01:00:57 crc kubenswrapper[4884]: E1210 01:00:57.296805 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.160866 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29422141-fmvp7"] Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.162799 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.171717 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29422141-fmvp7"] Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.285684 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-combined-ca-bundle\") pod \"keystone-cron-29422141-fmvp7\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.285905 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5cd6\" (UniqueName: \"kubernetes.io/projected/7c8db774-d53a-4a89-87fa-9863b4b73e5a-kube-api-access-k5cd6\") pod \"keystone-cron-29422141-fmvp7\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.285988 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-fernet-keys\") pod \"keystone-cron-29422141-fmvp7\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.286022 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-config-data\") pod \"keystone-cron-29422141-fmvp7\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.388058 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5cd6\" (UniqueName: \"kubernetes.io/projected/7c8db774-d53a-4a89-87fa-9863b4b73e5a-kube-api-access-k5cd6\") pod \"keystone-cron-29422141-fmvp7\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.388140 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-fernet-keys\") pod \"keystone-cron-29422141-fmvp7\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.388165 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-config-data\") pod \"keystone-cron-29422141-fmvp7\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.388202 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-combined-ca-bundle\") pod \"keystone-cron-29422141-fmvp7\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.400236 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-fernet-keys\") pod \"keystone-cron-29422141-fmvp7\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.401625 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-config-data\") pod \"keystone-cron-29422141-fmvp7\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.401701 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-combined-ca-bundle\") pod \"keystone-cron-29422141-fmvp7\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.403118 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5cd6\" (UniqueName: \"kubernetes.io/projected/7c8db774-d53a-4a89-87fa-9863b4b73e5a-kube-api-access-k5cd6\") pod \"keystone-cron-29422141-fmvp7\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.485784 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:00 crc kubenswrapper[4884]: I1210 01:01:00.968750 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29422141-fmvp7"] Dec 10 01:01:01 crc kubenswrapper[4884]: I1210 01:01:01.963013 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422141-fmvp7" event={"ID":"7c8db774-d53a-4a89-87fa-9863b4b73e5a","Type":"ContainerStarted","Data":"335d82f2995139652d80b4c7a9f64ddcb1ec2853077aa89b701cc31d0bdfdbd4"} Dec 10 01:01:01 crc kubenswrapper[4884]: I1210 01:01:01.964357 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422141-fmvp7" event={"ID":"7c8db774-d53a-4a89-87fa-9863b4b73e5a","Type":"ContainerStarted","Data":"65ce6ae7e30bdbd6b8c9606188267e5a69212081b140347b72232b78cb0978bd"} Dec 10 01:01:02 crc kubenswrapper[4884]: I1210 01:01:01.996196 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29422141-fmvp7" podStartSLOduration=1.9961701029999999 podStartE2EDuration="1.996170103s" podCreationTimestamp="2025-12-10 01:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 01:01:01.988890064 +0000 UTC m=+1835.066847201" watchObservedRunningTime="2025-12-10 01:01:01.996170103 +0000 UTC m=+1835.074127220" Dec 10 01:01:02 crc kubenswrapper[4884]: E1210 01:01:02.290785 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:01:03 crc kubenswrapper[4884]: I1210 01:01:03.986362 4884 generic.go:334] "Generic (PLEG): container finished" podID="7c8db774-d53a-4a89-87fa-9863b4b73e5a" containerID="335d82f2995139652d80b4c7a9f64ddcb1ec2853077aa89b701cc31d0bdfdbd4" exitCode=0 Dec 10 01:01:03 crc kubenswrapper[4884]: I1210 01:01:03.986574 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422141-fmvp7" event={"ID":"7c8db774-d53a-4a89-87fa-9863b4b73e5a","Type":"ContainerDied","Data":"335d82f2995139652d80b4c7a9f64ddcb1ec2853077aa89b701cc31d0bdfdbd4"} Dec 10 01:01:05 crc kubenswrapper[4884]: E1210 01:01:05.290754 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.492655 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.617263 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-config-data\") pod \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.617337 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-combined-ca-bundle\") pod \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.617526 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5cd6\" (UniqueName: \"kubernetes.io/projected/7c8db774-d53a-4a89-87fa-9863b4b73e5a-kube-api-access-k5cd6\") pod \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.617586 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-fernet-keys\") pod \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\" (UID: \"7c8db774-d53a-4a89-87fa-9863b4b73e5a\") " Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.630731 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7c8db774-d53a-4a89-87fa-9863b4b73e5a" (UID: "7c8db774-d53a-4a89-87fa-9863b4b73e5a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.636632 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c8db774-d53a-4a89-87fa-9863b4b73e5a-kube-api-access-k5cd6" (OuterVolumeSpecName: "kube-api-access-k5cd6") pod "7c8db774-d53a-4a89-87fa-9863b4b73e5a" (UID: "7c8db774-d53a-4a89-87fa-9863b4b73e5a"). InnerVolumeSpecName "kube-api-access-k5cd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.662696 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c8db774-d53a-4a89-87fa-9863b4b73e5a" (UID: "7c8db774-d53a-4a89-87fa-9863b4b73e5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.699635 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-config-data" (OuterVolumeSpecName: "config-data") pod "7c8db774-d53a-4a89-87fa-9863b4b73e5a" (UID: "7c8db774-d53a-4a89-87fa-9863b4b73e5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.720783 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5cd6\" (UniqueName: \"kubernetes.io/projected/7c8db774-d53a-4a89-87fa-9863b4b73e5a-kube-api-access-k5cd6\") on node \"crc\" DevicePath \"\"" Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.720833 4884 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.720851 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 01:01:05 crc kubenswrapper[4884]: I1210 01:01:05.720866 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c8db774-d53a-4a89-87fa-9863b4b73e5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:01:06 crc kubenswrapper[4884]: I1210 01:01:06.014008 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422141-fmvp7" event={"ID":"7c8db774-d53a-4a89-87fa-9863b4b73e5a","Type":"ContainerDied","Data":"65ce6ae7e30bdbd6b8c9606188267e5a69212081b140347b72232b78cb0978bd"} Dec 10 01:01:06 crc kubenswrapper[4884]: I1210 01:01:06.014065 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65ce6ae7e30bdbd6b8c9606188267e5a69212081b140347b72232b78cb0978bd" Dec 10 01:01:06 crc kubenswrapper[4884]: I1210 01:01:06.014071 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422141-fmvp7" Dec 10 01:01:11 crc kubenswrapper[4884]: I1210 01:01:11.288588 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 01:01:11 crc kubenswrapper[4884]: E1210 01:01:11.290951 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:01:16 crc kubenswrapper[4884]: E1210 01:01:16.292099 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:01:18 crc kubenswrapper[4884]: E1210 01:01:18.291690 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:01:24 crc kubenswrapper[4884]: I1210 01:01:24.287908 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 01:01:25 crc kubenswrapper[4884]: I1210 01:01:25.314033 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"5e4d50ba947a172672504a4b76d8d8687fb7bd5358cadb2fdb3ee9a59857b81c"} Dec 10 01:01:31 crc kubenswrapper[4884]: E1210 01:01:31.290633 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:01:31 crc kubenswrapper[4884]: E1210 01:01:31.290818 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:01:38 crc kubenswrapper[4884]: I1210 01:01:38.491381 4884 scope.go:117] "RemoveContainer" containerID="662cdfc7178497f3ca9035f4105b066b98bed00449bc4aaed874f1bd12f203df" Dec 10 01:01:38 crc kubenswrapper[4884]: I1210 01:01:38.546693 4884 scope.go:117] "RemoveContainer" containerID="637198d9f6a098982ca7f5166f8dbe4b0d6a2c4fbdebbfce96ebaa7f1fb17b94" Dec 10 01:01:38 crc kubenswrapper[4884]: I1210 01:01:38.583194 4884 scope.go:117] "RemoveContainer" containerID="582e1e7e5f16e9efe27f36d54d6a8dd1682fddfd64100ac5ba7e44b064c3bea0" Dec 10 01:01:42 crc kubenswrapper[4884]: E1210 01:01:42.290748 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:01:46 crc kubenswrapper[4884]: E1210 01:01:46.290445 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:01:56 crc kubenswrapper[4884]: E1210 01:01:56.290684 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:02:00 crc kubenswrapper[4884]: E1210 01:02:00.416932 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:02:00 crc kubenswrapper[4884]: E1210 01:02:00.417525 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:02:00 crc kubenswrapper[4884]: E1210 01:02:00.417684 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:02:00 crc kubenswrapper[4884]: E1210 01:02:00.418941 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.061472 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fczl5"] Dec 10 01:02:08 crc kubenswrapper[4884]: E1210 01:02:08.062647 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c8db774-d53a-4a89-87fa-9863b4b73e5a" containerName="keystone-cron" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.062666 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c8db774-d53a-4a89-87fa-9863b4b73e5a" containerName="keystone-cron" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.062944 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c8db774-d53a-4a89-87fa-9863b4b73e5a" containerName="keystone-cron" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.064865 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.078637 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fczl5"] Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.156291 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqxjw\" (UniqueName: \"kubernetes.io/projected/c16241b1-ffcb-4974-8056-a5aa2166dcfb-kube-api-access-rqxjw\") pod \"redhat-marketplace-fczl5\" (UID: \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\") " pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.156336 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c16241b1-ffcb-4974-8056-a5aa2166dcfb-utilities\") pod \"redhat-marketplace-fczl5\" (UID: \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\") " pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.156370 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c16241b1-ffcb-4974-8056-a5aa2166dcfb-catalog-content\") pod \"redhat-marketplace-fczl5\" (UID: \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\") " pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.258806 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqxjw\" (UniqueName: \"kubernetes.io/projected/c16241b1-ffcb-4974-8056-a5aa2166dcfb-kube-api-access-rqxjw\") pod \"redhat-marketplace-fczl5\" (UID: \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\") " pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.258844 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c16241b1-ffcb-4974-8056-a5aa2166dcfb-utilities\") pod \"redhat-marketplace-fczl5\" (UID: \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\") " pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.258867 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c16241b1-ffcb-4974-8056-a5aa2166dcfb-catalog-content\") pod \"redhat-marketplace-fczl5\" (UID: \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\") " pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.259352 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c16241b1-ffcb-4974-8056-a5aa2166dcfb-utilities\") pod \"redhat-marketplace-fczl5\" (UID: \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\") " pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.259475 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c16241b1-ffcb-4974-8056-a5aa2166dcfb-catalog-content\") pod \"redhat-marketplace-fczl5\" (UID: \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\") " pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.285269 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqxjw\" (UniqueName: \"kubernetes.io/projected/c16241b1-ffcb-4974-8056-a5aa2166dcfb-kube-api-access-rqxjw\") pod \"redhat-marketplace-fczl5\" (UID: \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\") " pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.385905 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.883364 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fczl5"] Dec 10 01:02:08 crc kubenswrapper[4884]: W1210 01:02:08.886806 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc16241b1_ffcb_4974_8056_a5aa2166dcfb.slice/crio-8876b55fa3691a9b3ac17d4716506891023b4ee01206c048bfcb768ed471cbad WatchSource:0}: Error finding container 8876b55fa3691a9b3ac17d4716506891023b4ee01206c048bfcb768ed471cbad: Status 404 returned error can't find the container with id 8876b55fa3691a9b3ac17d4716506891023b4ee01206c048bfcb768ed471cbad Dec 10 01:02:08 crc kubenswrapper[4884]: I1210 01:02:08.927993 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fczl5" event={"ID":"c16241b1-ffcb-4974-8056-a5aa2166dcfb","Type":"ContainerStarted","Data":"8876b55fa3691a9b3ac17d4716506891023b4ee01206c048bfcb768ed471cbad"} Dec 10 01:02:09 crc kubenswrapper[4884]: E1210 01:02:09.289476 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:02:09 crc kubenswrapper[4884]: I1210 01:02:09.941077 4884 generic.go:334] "Generic (PLEG): container finished" podID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" containerID="adf8ea0b7ecc50e10382b56ce13a0b90df46bbf98da8bf2001da9f3f18cb2d89" exitCode=0 Dec 10 01:02:09 crc kubenswrapper[4884]: I1210 01:02:09.941124 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fczl5" event={"ID":"c16241b1-ffcb-4974-8056-a5aa2166dcfb","Type":"ContainerDied","Data":"adf8ea0b7ecc50e10382b56ce13a0b90df46bbf98da8bf2001da9f3f18cb2d89"} Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.060847 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-g6tkp"] Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.063583 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.079837 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g6tkp"] Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.108735 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-catalog-content\") pod \"redhat-operators-g6tkp\" (UID: \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\") " pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.108857 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82dnv\" (UniqueName: \"kubernetes.io/projected/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-kube-api-access-82dnv\") pod \"redhat-operators-g6tkp\" (UID: \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\") " pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.108973 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-utilities\") pod \"redhat-operators-g6tkp\" (UID: \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\") " pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.211216 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-catalog-content\") pod \"redhat-operators-g6tkp\" (UID: \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\") " pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.211413 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82dnv\" (UniqueName: \"kubernetes.io/projected/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-kube-api-access-82dnv\") pod \"redhat-operators-g6tkp\" (UID: \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\") " pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.211791 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-utilities\") pod \"redhat-operators-g6tkp\" (UID: \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\") " pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.212172 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-catalog-content\") pod \"redhat-operators-g6tkp\" (UID: \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\") " pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.212728 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-utilities\") pod \"redhat-operators-g6tkp\" (UID: \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\") " pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.231274 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82dnv\" (UniqueName: \"kubernetes.io/projected/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-kube-api-access-82dnv\") pod \"redhat-operators-g6tkp\" (UID: \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\") " pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.444846 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:10 crc kubenswrapper[4884]: W1210 01:02:10.940195 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad250c1e_6c6e_4233_8a65_3eb6fbf0482f.slice/crio-52fbfd7f79f090eb45450ae681dc7b5c0cf0d2c58645e8e7bb40f1c5d8950d85 WatchSource:0}: Error finding container 52fbfd7f79f090eb45450ae681dc7b5c0cf0d2c58645e8e7bb40f1c5d8950d85: Status 404 returned error can't find the container with id 52fbfd7f79f090eb45450ae681dc7b5c0cf0d2c58645e8e7bb40f1c5d8950d85 Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.942546 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-g6tkp"] Dec 10 01:02:10 crc kubenswrapper[4884]: I1210 01:02:10.954971 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g6tkp" event={"ID":"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f","Type":"ContainerStarted","Data":"52fbfd7f79f090eb45450ae681dc7b5c0cf0d2c58645e8e7bb40f1c5d8950d85"} Dec 10 01:02:11 crc kubenswrapper[4884]: I1210 01:02:11.968534 4884 generic.go:334] "Generic (PLEG): container finished" podID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" containerID="f7f018929260ba8f33f781d2448e5b35076955db14f316a8e2fb73009a72ec35" exitCode=0 Dec 10 01:02:11 crc kubenswrapper[4884]: I1210 01:02:11.968706 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g6tkp" event={"ID":"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f","Type":"ContainerDied","Data":"f7f018929260ba8f33f781d2448e5b35076955db14f316a8e2fb73009a72ec35"} Dec 10 01:02:11 crc kubenswrapper[4884]: I1210 01:02:11.971705 4884 generic.go:334] "Generic (PLEG): container finished" podID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" containerID="91a02830586f787a50f91d19b3d9f8c962c8a4ed4fd3175b880552f865efb406" exitCode=0 Dec 10 01:02:11 crc kubenswrapper[4884]: I1210 01:02:11.971742 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fczl5" event={"ID":"c16241b1-ffcb-4974-8056-a5aa2166dcfb","Type":"ContainerDied","Data":"91a02830586f787a50f91d19b3d9f8c962c8a4ed4fd3175b880552f865efb406"} Dec 10 01:02:13 crc kubenswrapper[4884]: I1210 01:02:13.992802 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g6tkp" event={"ID":"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f","Type":"ContainerStarted","Data":"184967a70e4fecfe849eff35e6c03b70b223ebb7b64f190cc93a61ed18271b00"} Dec 10 01:02:13 crc kubenswrapper[4884]: I1210 01:02:13.995183 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fczl5" event={"ID":"c16241b1-ffcb-4974-8056-a5aa2166dcfb","Type":"ContainerStarted","Data":"3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a"} Dec 10 01:02:14 crc kubenswrapper[4884]: I1210 01:02:14.032826 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fczl5" podStartSLOduration=3.145015842 podStartE2EDuration="6.032809044s" podCreationTimestamp="2025-12-10 01:02:08 +0000 UTC" firstStartedPulling="2025-12-10 01:02:09.943924353 +0000 UTC m=+1903.021881470" lastFinishedPulling="2025-12-10 01:02:12.831717555 +0000 UTC m=+1905.909674672" observedRunningTime="2025-12-10 01:02:14.025765812 +0000 UTC m=+1907.103722949" watchObservedRunningTime="2025-12-10 01:02:14.032809044 +0000 UTC m=+1907.110766161" Dec 10 01:02:14 crc kubenswrapper[4884]: E1210 01:02:14.288669 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:02:18 crc kubenswrapper[4884]: I1210 01:02:18.037158 4884 generic.go:334] "Generic (PLEG): container finished" podID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" containerID="184967a70e4fecfe849eff35e6c03b70b223ebb7b64f190cc93a61ed18271b00" exitCode=0 Dec 10 01:02:18 crc kubenswrapper[4884]: I1210 01:02:18.037300 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g6tkp" event={"ID":"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f","Type":"ContainerDied","Data":"184967a70e4fecfe849eff35e6c03b70b223ebb7b64f190cc93a61ed18271b00"} Dec 10 01:02:18 crc kubenswrapper[4884]: I1210 01:02:18.386175 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:18 crc kubenswrapper[4884]: I1210 01:02:18.386524 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:19 crc kubenswrapper[4884]: I1210 01:02:19.464005 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-fczl5" podUID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" containerName="registry-server" probeResult="failure" output=< Dec 10 01:02:19 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 01:02:19 crc kubenswrapper[4884]: > Dec 10 01:02:20 crc kubenswrapper[4884]: I1210 01:02:20.065679 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g6tkp" event={"ID":"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f","Type":"ContainerStarted","Data":"c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873"} Dec 10 01:02:20 crc kubenswrapper[4884]: I1210 01:02:20.090203 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-g6tkp" podStartSLOduration=3.335376381 podStartE2EDuration="10.090182532s" podCreationTimestamp="2025-12-10 01:02:10 +0000 UTC" firstStartedPulling="2025-12-10 01:02:11.971076728 +0000 UTC m=+1905.049033845" lastFinishedPulling="2025-12-10 01:02:18.725882879 +0000 UTC m=+1911.803839996" observedRunningTime="2025-12-10 01:02:20.086968855 +0000 UTC m=+1913.164925982" watchObservedRunningTime="2025-12-10 01:02:20.090182532 +0000 UTC m=+1913.168139649" Dec 10 01:02:20 crc kubenswrapper[4884]: E1210 01:02:20.426629 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:02:20 crc kubenswrapper[4884]: E1210 01:02:20.427158 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:02:20 crc kubenswrapper[4884]: E1210 01:02:20.427403 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:02:20 crc kubenswrapper[4884]: E1210 01:02:20.428747 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:02:20 crc kubenswrapper[4884]: I1210 01:02:20.445104 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:20 crc kubenswrapper[4884]: I1210 01:02:20.445413 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:21 crc kubenswrapper[4884]: I1210 01:02:21.524113 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-g6tkp" podUID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" containerName="registry-server" probeResult="failure" output=< Dec 10 01:02:21 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 01:02:21 crc kubenswrapper[4884]: > Dec 10 01:02:27 crc kubenswrapper[4884]: E1210 01:02:27.291919 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:02:28 crc kubenswrapper[4884]: I1210 01:02:28.461810 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:28 crc kubenswrapper[4884]: I1210 01:02:28.521651 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:28 crc kubenswrapper[4884]: I1210 01:02:28.711685 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fczl5"] Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.176776 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fczl5" podUID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" containerName="registry-server" containerID="cri-o://3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a" gracePeriod=2 Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.533383 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.614049 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.699612 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.814897 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqxjw\" (UniqueName: \"kubernetes.io/projected/c16241b1-ffcb-4974-8056-a5aa2166dcfb-kube-api-access-rqxjw\") pod \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\" (UID: \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\") " Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.815324 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c16241b1-ffcb-4974-8056-a5aa2166dcfb-catalog-content\") pod \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\" (UID: \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\") " Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.815375 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c16241b1-ffcb-4974-8056-a5aa2166dcfb-utilities\") pod \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\" (UID: \"c16241b1-ffcb-4974-8056-a5aa2166dcfb\") " Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.815982 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c16241b1-ffcb-4974-8056-a5aa2166dcfb-utilities" (OuterVolumeSpecName: "utilities") pod "c16241b1-ffcb-4974-8056-a5aa2166dcfb" (UID: "c16241b1-ffcb-4974-8056-a5aa2166dcfb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.825748 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c16241b1-ffcb-4974-8056-a5aa2166dcfb-kube-api-access-rqxjw" (OuterVolumeSpecName: "kube-api-access-rqxjw") pod "c16241b1-ffcb-4974-8056-a5aa2166dcfb" (UID: "c16241b1-ffcb-4974-8056-a5aa2166dcfb"). InnerVolumeSpecName "kube-api-access-rqxjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.855206 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c16241b1-ffcb-4974-8056-a5aa2166dcfb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c16241b1-ffcb-4974-8056-a5aa2166dcfb" (UID: "c16241b1-ffcb-4974-8056-a5aa2166dcfb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.918317 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqxjw\" (UniqueName: \"kubernetes.io/projected/c16241b1-ffcb-4974-8056-a5aa2166dcfb-kube-api-access-rqxjw\") on node \"crc\" DevicePath \"\"" Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.918351 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c16241b1-ffcb-4974-8056-a5aa2166dcfb-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:02:30 crc kubenswrapper[4884]: I1210 01:02:30.918361 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c16241b1-ffcb-4974-8056-a5aa2166dcfb-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.196310 4884 generic.go:334] "Generic (PLEG): container finished" podID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" containerID="3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a" exitCode=0 Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.196508 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fczl5" Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.196584 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fczl5" event={"ID":"c16241b1-ffcb-4974-8056-a5aa2166dcfb","Type":"ContainerDied","Data":"3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a"} Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.196634 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fczl5" event={"ID":"c16241b1-ffcb-4974-8056-a5aa2166dcfb","Type":"ContainerDied","Data":"8876b55fa3691a9b3ac17d4716506891023b4ee01206c048bfcb768ed471cbad"} Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.196669 4884 scope.go:117] "RemoveContainer" containerID="3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a" Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.237500 4884 scope.go:117] "RemoveContainer" containerID="91a02830586f787a50f91d19b3d9f8c962c8a4ed4fd3175b880552f865efb406" Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.266279 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fczl5"] Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.280567 4884 scope.go:117] "RemoveContainer" containerID="adf8ea0b7ecc50e10382b56ce13a0b90df46bbf98da8bf2001da9f3f18cb2d89" Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.282404 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fczl5"] Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.334900 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" path="/var/lib/kubelet/pods/c16241b1-ffcb-4974-8056-a5aa2166dcfb/volumes" Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.349847 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-g6tkp"] Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.354686 4884 scope.go:117] "RemoveContainer" containerID="3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a" Dec 10 01:02:31 crc kubenswrapper[4884]: E1210 01:02:31.355892 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a\": container with ID starting with 3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a not found: ID does not exist" containerID="3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a" Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.355930 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a"} err="failed to get container status \"3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a\": rpc error: code = NotFound desc = could not find container \"3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a\": container with ID starting with 3902ab1e0345a525697eadc96a2d9297d854379b7a33dbfa084423894901cb3a not found: ID does not exist" Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.355953 4884 scope.go:117] "RemoveContainer" containerID="91a02830586f787a50f91d19b3d9f8c962c8a4ed4fd3175b880552f865efb406" Dec 10 01:02:31 crc kubenswrapper[4884]: E1210 01:02:31.356256 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91a02830586f787a50f91d19b3d9f8c962c8a4ed4fd3175b880552f865efb406\": container with ID starting with 91a02830586f787a50f91d19b3d9f8c962c8a4ed4fd3175b880552f865efb406 not found: ID does not exist" containerID="91a02830586f787a50f91d19b3d9f8c962c8a4ed4fd3175b880552f865efb406" Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.356396 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91a02830586f787a50f91d19b3d9f8c962c8a4ed4fd3175b880552f865efb406"} err="failed to get container status \"91a02830586f787a50f91d19b3d9f8c962c8a4ed4fd3175b880552f865efb406\": rpc error: code = NotFound desc = could not find container \"91a02830586f787a50f91d19b3d9f8c962c8a4ed4fd3175b880552f865efb406\": container with ID starting with 91a02830586f787a50f91d19b3d9f8c962c8a4ed4fd3175b880552f865efb406 not found: ID does not exist" Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.356539 4884 scope.go:117] "RemoveContainer" containerID="adf8ea0b7ecc50e10382b56ce13a0b90df46bbf98da8bf2001da9f3f18cb2d89" Dec 10 01:02:31 crc kubenswrapper[4884]: E1210 01:02:31.357048 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"adf8ea0b7ecc50e10382b56ce13a0b90df46bbf98da8bf2001da9f3f18cb2d89\": container with ID starting with adf8ea0b7ecc50e10382b56ce13a0b90df46bbf98da8bf2001da9f3f18cb2d89 not found: ID does not exist" containerID="adf8ea0b7ecc50e10382b56ce13a0b90df46bbf98da8bf2001da9f3f18cb2d89" Dec 10 01:02:31 crc kubenswrapper[4884]: I1210 01:02:31.357079 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"adf8ea0b7ecc50e10382b56ce13a0b90df46bbf98da8bf2001da9f3f18cb2d89"} err="failed to get container status \"adf8ea0b7ecc50e10382b56ce13a0b90df46bbf98da8bf2001da9f3f18cb2d89\": rpc error: code = NotFound desc = could not find container \"adf8ea0b7ecc50e10382b56ce13a0b90df46bbf98da8bf2001da9f3f18cb2d89\": container with ID starting with adf8ea0b7ecc50e10382b56ce13a0b90df46bbf98da8bf2001da9f3f18cb2d89 not found: ID does not exist" Dec 10 01:02:32 crc kubenswrapper[4884]: I1210 01:02:32.213608 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-g6tkp" podUID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" containerName="registry-server" containerID="cri-o://c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873" gracePeriod=2 Dec 10 01:02:32 crc kubenswrapper[4884]: I1210 01:02:32.782088 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:32 crc kubenswrapper[4884]: I1210 01:02:32.875922 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-utilities\") pod \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\" (UID: \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\") " Dec 10 01:02:32 crc kubenswrapper[4884]: I1210 01:02:32.876251 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-catalog-content\") pod \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\" (UID: \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\") " Dec 10 01:02:32 crc kubenswrapper[4884]: I1210 01:02:32.876279 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82dnv\" (UniqueName: \"kubernetes.io/projected/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-kube-api-access-82dnv\") pod \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\" (UID: \"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f\") " Dec 10 01:02:32 crc kubenswrapper[4884]: I1210 01:02:32.876633 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-utilities" (OuterVolumeSpecName: "utilities") pod "ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" (UID: "ad250c1e-6c6e-4233-8a65-3eb6fbf0482f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:02:32 crc kubenswrapper[4884]: I1210 01:02:32.876862 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:02:32 crc kubenswrapper[4884]: I1210 01:02:32.882683 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-kube-api-access-82dnv" (OuterVolumeSpecName: "kube-api-access-82dnv") pod "ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" (UID: "ad250c1e-6c6e-4233-8a65-3eb6fbf0482f"). InnerVolumeSpecName "kube-api-access-82dnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:02:32 crc kubenswrapper[4884]: I1210 01:02:32.978666 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82dnv\" (UniqueName: \"kubernetes.io/projected/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-kube-api-access-82dnv\") on node \"crc\" DevicePath \"\"" Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.017834 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" (UID: "ad250c1e-6c6e-4233-8a65-3eb6fbf0482f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.081094 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.236767 4884 generic.go:334] "Generic (PLEG): container finished" podID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" containerID="c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873" exitCode=0 Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.236835 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g6tkp" event={"ID":"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f","Type":"ContainerDied","Data":"c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873"} Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.236861 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-g6tkp" Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.236899 4884 scope.go:117] "RemoveContainer" containerID="c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873" Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.236878 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-g6tkp" event={"ID":"ad250c1e-6c6e-4233-8a65-3eb6fbf0482f","Type":"ContainerDied","Data":"52fbfd7f79f090eb45450ae681dc7b5c0cf0d2c58645e8e7bb40f1c5d8950d85"} Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.278607 4884 scope.go:117] "RemoveContainer" containerID="184967a70e4fecfe849eff35e6c03b70b223ebb7b64f190cc93a61ed18271b00" Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.312231 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-g6tkp"] Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.313919 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-g6tkp"] Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.320648 4884 scope.go:117] "RemoveContainer" containerID="f7f018929260ba8f33f781d2448e5b35076955db14f316a8e2fb73009a72ec35" Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.385753 4884 scope.go:117] "RemoveContainer" containerID="c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873" Dec 10 01:02:33 crc kubenswrapper[4884]: E1210 01:02:33.386334 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873\": container with ID starting with c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873 not found: ID does not exist" containerID="c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873" Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.386400 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873"} err="failed to get container status \"c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873\": rpc error: code = NotFound desc = could not find container \"c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873\": container with ID starting with c297c853995e0113a28a906404570f26d37419618f92f7b0f150bb345a708873 not found: ID does not exist" Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.386528 4884 scope.go:117] "RemoveContainer" containerID="184967a70e4fecfe849eff35e6c03b70b223ebb7b64f190cc93a61ed18271b00" Dec 10 01:02:33 crc kubenswrapper[4884]: E1210 01:02:33.386883 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"184967a70e4fecfe849eff35e6c03b70b223ebb7b64f190cc93a61ed18271b00\": container with ID starting with 184967a70e4fecfe849eff35e6c03b70b223ebb7b64f190cc93a61ed18271b00 not found: ID does not exist" containerID="184967a70e4fecfe849eff35e6c03b70b223ebb7b64f190cc93a61ed18271b00" Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.386926 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"184967a70e4fecfe849eff35e6c03b70b223ebb7b64f190cc93a61ed18271b00"} err="failed to get container status \"184967a70e4fecfe849eff35e6c03b70b223ebb7b64f190cc93a61ed18271b00\": rpc error: code = NotFound desc = could not find container \"184967a70e4fecfe849eff35e6c03b70b223ebb7b64f190cc93a61ed18271b00\": container with ID starting with 184967a70e4fecfe849eff35e6c03b70b223ebb7b64f190cc93a61ed18271b00 not found: ID does not exist" Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.386954 4884 scope.go:117] "RemoveContainer" containerID="f7f018929260ba8f33f781d2448e5b35076955db14f316a8e2fb73009a72ec35" Dec 10 01:02:33 crc kubenswrapper[4884]: E1210 01:02:33.387228 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7f018929260ba8f33f781d2448e5b35076955db14f316a8e2fb73009a72ec35\": container with ID starting with f7f018929260ba8f33f781d2448e5b35076955db14f316a8e2fb73009a72ec35 not found: ID does not exist" containerID="f7f018929260ba8f33f781d2448e5b35076955db14f316a8e2fb73009a72ec35" Dec 10 01:02:33 crc kubenswrapper[4884]: I1210 01:02:33.387267 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7f018929260ba8f33f781d2448e5b35076955db14f316a8e2fb73009a72ec35"} err="failed to get container status \"f7f018929260ba8f33f781d2448e5b35076955db14f316a8e2fb73009a72ec35\": rpc error: code = NotFound desc = could not find container \"f7f018929260ba8f33f781d2448e5b35076955db14f316a8e2fb73009a72ec35\": container with ID starting with f7f018929260ba8f33f781d2448e5b35076955db14f316a8e2fb73009a72ec35 not found: ID does not exist" Dec 10 01:02:35 crc kubenswrapper[4884]: E1210 01:02:35.319167 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:02:35 crc kubenswrapper[4884]: I1210 01:02:35.348141 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" path="/var/lib/kubelet/pods/ad250c1e-6c6e-4233-8a65-3eb6fbf0482f/volumes" Dec 10 01:02:38 crc kubenswrapper[4884]: I1210 01:02:38.713824 4884 scope.go:117] "RemoveContainer" containerID="ddd6ed187686dbe6e3b919b1253d8dca9746e67fda2228465e9360af596653c3" Dec 10 01:02:41 crc kubenswrapper[4884]: E1210 01:02:41.292814 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:02:49 crc kubenswrapper[4884]: E1210 01:02:49.290313 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:02:55 crc kubenswrapper[4884]: E1210 01:02:55.292224 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:03:01 crc kubenswrapper[4884]: E1210 01:03:01.289845 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:03:07 crc kubenswrapper[4884]: E1210 01:03:07.315467 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:03:16 crc kubenswrapper[4884]: E1210 01:03:16.291014 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:03:19 crc kubenswrapper[4884]: E1210 01:03:19.289856 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:03:29 crc kubenswrapper[4884]: E1210 01:03:29.293052 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:03:31 crc kubenswrapper[4884]: E1210 01:03:31.290968 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:03:43 crc kubenswrapper[4884]: E1210 01:03:43.290851 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:03:44 crc kubenswrapper[4884]: E1210 01:03:44.289788 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:03:48 crc kubenswrapper[4884]: I1210 01:03:48.098970 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:03:48 crc kubenswrapper[4884]: I1210 01:03:48.099835 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:03:57 crc kubenswrapper[4884]: E1210 01:03:57.302025 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:03:58 crc kubenswrapper[4884]: E1210 01:03:58.289746 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:04:11 crc kubenswrapper[4884]: E1210 01:04:11.290864 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:04:12 crc kubenswrapper[4884]: E1210 01:04:12.290283 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:04:18 crc kubenswrapper[4884]: I1210 01:04:18.098242 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:04:18 crc kubenswrapper[4884]: I1210 01:04:18.099043 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:04:22 crc kubenswrapper[4884]: I1210 01:04:22.066328 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-pgk6x"] Dec 10 01:04:22 crc kubenswrapper[4884]: I1210 01:04:22.086531 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-07ac-account-create-update-vs7v2"] Dec 10 01:04:22 crc kubenswrapper[4884]: I1210 01:04:22.099786 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-db-create-pgk6x"] Dec 10 01:04:22 crc kubenswrapper[4884]: I1210 01:04:22.113188 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-07ac-account-create-update-vs7v2"] Dec 10 01:04:23 crc kubenswrapper[4884]: I1210 01:04:23.308714 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11394f81-f238-4929-8c40-4c279b8b39dc" path="/var/lib/kubelet/pods/11394f81-f238-4929-8c40-4c279b8b39dc/volumes" Dec 10 01:04:23 crc kubenswrapper[4884]: I1210 01:04:23.311486 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3" path="/var/lib/kubelet/pods/1dafc7ce-4def-4aeb-acdc-c41bdde7b5b3/volumes" Dec 10 01:04:24 crc kubenswrapper[4884]: I1210 01:04:24.044327 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-5a59-account-create-update-5pspx"] Dec 10 01:04:24 crc kubenswrapper[4884]: I1210 01:04:24.066501 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-sr25b"] Dec 10 01:04:24 crc kubenswrapper[4884]: I1210 01:04:24.078667 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-5a59-account-create-update-5pspx"] Dec 10 01:04:24 crc kubenswrapper[4884]: I1210 01:04:24.087295 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-sr25b"] Dec 10 01:04:24 crc kubenswrapper[4884]: E1210 01:04:24.289897 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:04:25 crc kubenswrapper[4884]: I1210 01:04:25.315228 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cadd997-aad1-45be-9922-6e2805437098" path="/var/lib/kubelet/pods/4cadd997-aad1-45be-9922-6e2805437098/volumes" Dec 10 01:04:25 crc kubenswrapper[4884]: I1210 01:04:25.317376 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="624c66a0-e094-4c3e-94f8-efe258817479" path="/var/lib/kubelet/pods/624c66a0-e094-4c3e-94f8-efe258817479/volumes" Dec 10 01:04:26 crc kubenswrapper[4884]: E1210 01:04:26.290468 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:04:29 crc kubenswrapper[4884]: I1210 01:04:29.052243 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-9058-account-create-update-sfww4"] Dec 10 01:04:29 crc kubenswrapper[4884]: I1210 01:04:29.066679 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-ad48-account-create-update-rdb85"] Dec 10 01:04:29 crc kubenswrapper[4884]: I1210 01:04:29.076152 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-x6mx8"] Dec 10 01:04:29 crc kubenswrapper[4884]: I1210 01:04:29.085125 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-sz9lb"] Dec 10 01:04:29 crc kubenswrapper[4884]: I1210 01:04:29.094919 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-ad48-account-create-update-rdb85"] Dec 10 01:04:29 crc kubenswrapper[4884]: I1210 01:04:29.105046 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-x6mx8"] Dec 10 01:04:29 crc kubenswrapper[4884]: I1210 01:04:29.115133 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-sz9lb"] Dec 10 01:04:29 crc kubenswrapper[4884]: I1210 01:04:29.129193 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-9058-account-create-update-sfww4"] Dec 10 01:04:29 crc kubenswrapper[4884]: I1210 01:04:29.307277 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b6ae20d-aec5-445c-82be-75954a52176b" path="/var/lib/kubelet/pods/4b6ae20d-aec5-445c-82be-75954a52176b/volumes" Dec 10 01:04:29 crc kubenswrapper[4884]: I1210 01:04:29.308450 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6df655e4-396f-4fc7-8b31-ae3ee55134f2" path="/var/lib/kubelet/pods/6df655e4-396f-4fc7-8b31-ae3ee55134f2/volumes" Dec 10 01:04:29 crc kubenswrapper[4884]: I1210 01:04:29.309163 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f111e4f-54f4-4964-8ccf-d6336884eeec" path="/var/lib/kubelet/pods/7f111e4f-54f4-4964-8ccf-d6336884eeec/volumes" Dec 10 01:04:29 crc kubenswrapper[4884]: I1210 01:04:29.309718 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6e587fb-bea6-4152-8dbe-b8aa7c6e203a" path="/var/lib/kubelet/pods/b6e587fb-bea6-4152-8dbe-b8aa7c6e203a/volumes" Dec 10 01:04:31 crc kubenswrapper[4884]: I1210 01:04:31.047988 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-7751-account-create-update-2bn8l"] Dec 10 01:04:31 crc kubenswrapper[4884]: I1210 01:04:31.070293 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5"] Dec 10 01:04:31 crc kubenswrapper[4884]: I1210 01:04:31.083038 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-openstack-cell1-db-create-jhkd5"] Dec 10 01:04:31 crc kubenswrapper[4884]: I1210 01:04:31.091915 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mysqld-exporter-7751-account-create-update-2bn8l"] Dec 10 01:04:31 crc kubenswrapper[4884]: I1210 01:04:31.302699 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b01476d4-2f54-49f9-a292-f4c39fcc215b" path="/var/lib/kubelet/pods/b01476d4-2f54-49f9-a292-f4c39fcc215b/volumes" Dec 10 01:04:31 crc kubenswrapper[4884]: I1210 01:04:31.304275 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9f9f092-f6ef-4316-bc6a-71bdee41cec3" path="/var/lib/kubelet/pods/b9f9f092-f6ef-4316-bc6a-71bdee41cec3/volumes" Dec 10 01:04:36 crc kubenswrapper[4884]: E1210 01:04:36.291062 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:04:36 crc kubenswrapper[4884]: I1210 01:04:36.966233 4884 generic.go:334] "Generic (PLEG): container finished" podID="1a04a509-b2c8-4fd6-8443-24aa317b7eb8" containerID="06907428237f5a80f36c1cfd2e94f12e614023d8ab86935144889002f058ceaa" exitCode=0 Dec 10 01:04:36 crc kubenswrapper[4884]: I1210 01:04:36.966286 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" event={"ID":"1a04a509-b2c8-4fd6-8443-24aa317b7eb8","Type":"ContainerDied","Data":"06907428237f5a80f36c1cfd2e94f12e614023d8ab86935144889002f058ceaa"} Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.460321 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.583941 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-inventory\") pod \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.584055 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pxcg\" (UniqueName: \"kubernetes.io/projected/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-kube-api-access-7pxcg\") pod \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.584130 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-ssh-key\") pod \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.584219 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-bootstrap-combined-ca-bundle\") pod \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\" (UID: \"1a04a509-b2c8-4fd6-8443-24aa317b7eb8\") " Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.590669 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "1a04a509-b2c8-4fd6-8443-24aa317b7eb8" (UID: "1a04a509-b2c8-4fd6-8443-24aa317b7eb8"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.592253 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-kube-api-access-7pxcg" (OuterVolumeSpecName: "kube-api-access-7pxcg") pod "1a04a509-b2c8-4fd6-8443-24aa317b7eb8" (UID: "1a04a509-b2c8-4fd6-8443-24aa317b7eb8"). InnerVolumeSpecName "kube-api-access-7pxcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.619000 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1a04a509-b2c8-4fd6-8443-24aa317b7eb8" (UID: "1a04a509-b2c8-4fd6-8443-24aa317b7eb8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.624682 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-inventory" (OuterVolumeSpecName: "inventory") pod "1a04a509-b2c8-4fd6-8443-24aa317b7eb8" (UID: "1a04a509-b2c8-4fd6-8443-24aa317b7eb8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.687213 4884 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.687249 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.687263 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pxcg\" (UniqueName: \"kubernetes.io/projected/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-kube-api-access-7pxcg\") on node \"crc\" DevicePath \"\"" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.687274 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1a04a509-b2c8-4fd6-8443-24aa317b7eb8-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.863959 4884 scope.go:117] "RemoveContainer" containerID="8d7cf833cb3bedf29d290fc40d21ba80e92a975043e5618c95c9a644ee88a057" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.908750 4884 scope.go:117] "RemoveContainer" containerID="69421897fee06d879786d40a3e2b8304efae29600cc308dbed8b68d72c7a9247" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.990416 4884 scope.go:117] "RemoveContainer" containerID="c326ce590d92f55184399382e5bc4bef037cb66663e9febd88f0b5c6f2cfbc20" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.996284 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" event={"ID":"1a04a509-b2c8-4fd6-8443-24aa317b7eb8","Type":"ContainerDied","Data":"f0186d31d7aa4c0bc651088601688cde1a0fe4f1b3ab7c26135e5c9d4b1de61a"} Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.996326 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0186d31d7aa4c0bc651088601688cde1a0fe4f1b3ab7c26135e5c9d4b1de61a" Dec 10 01:04:38 crc kubenswrapper[4884]: I1210 01:04:38.996386 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.029008 4884 scope.go:117] "RemoveContainer" containerID="c0fcd1f9d043c430ad2cddffe32e287ca338c67eac269cc90e1d7eae365f13aa" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.073422 4884 scope.go:117] "RemoveContainer" containerID="d35b36d9ab6c1d8fb5e9c440a0e5d7303c2236ea19b4b2151ee72d4286279707" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.099801 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx"] Dec 10 01:04:39 crc kubenswrapper[4884]: E1210 01:04:39.100275 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a04a509-b2c8-4fd6-8443-24aa317b7eb8" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.100299 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a04a509-b2c8-4fd6-8443-24aa317b7eb8" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 10 01:04:39 crc kubenswrapper[4884]: E1210 01:04:39.100329 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" containerName="extract-utilities" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.100340 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" containerName="extract-utilities" Dec 10 01:04:39 crc kubenswrapper[4884]: E1210 01:04:39.100352 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" containerName="registry-server" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.100360 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" containerName="registry-server" Dec 10 01:04:39 crc kubenswrapper[4884]: E1210 01:04:39.100464 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" containerName="registry-server" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.100475 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" containerName="registry-server" Dec 10 01:04:39 crc kubenswrapper[4884]: E1210 01:04:39.100497 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" containerName="extract-utilities" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.100505 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" containerName="extract-utilities" Dec 10 01:04:39 crc kubenswrapper[4884]: E1210 01:04:39.100520 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" containerName="extract-content" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.100530 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" containerName="extract-content" Dec 10 01:04:39 crc kubenswrapper[4884]: E1210 01:04:39.100556 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" containerName="extract-content" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.100565 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" containerName="extract-content" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.100811 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a04a509-b2c8-4fd6-8443-24aa317b7eb8" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.100858 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad250c1e-6c6e-4233-8a65-3eb6fbf0482f" containerName="registry-server" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.100874 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="c16241b1-ffcb-4974-8056-a5aa2166dcfb" containerName="registry-server" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.101768 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.104775 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.104961 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.105329 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.105689 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.108415 4884 scope.go:117] "RemoveContainer" containerID="a0bae0e726bc3425819cfdb9bf38ef16c16cf8c22649d5a42a8934d76ad04c59" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.120135 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx"] Dec 10 01:04:39 crc kubenswrapper[4884]: E1210 01:04:39.129803 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a04a509_b2c8_4fd6_8443_24aa317b7eb8.slice\": RecentStats: unable to find data in memory cache]" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.131710 4884 scope.go:117] "RemoveContainer" containerID="a79500e8ce470f15597df7aa0276a4db505c17ec9898e30349889d091c57be6c" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.151238 4884 scope.go:117] "RemoveContainer" containerID="210db8a95b30465c4c9dc64069918bef6de762b64ebf7ecea2a087736e9ec629" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.181913 4884 scope.go:117] "RemoveContainer" containerID="3a56a6c7e79612294e55ff1ee533ffe52a235f17150ceaa33151ecf19c81914a" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.202591 4884 scope.go:117] "RemoveContainer" containerID="4b196f78d47d05462e74d7d0a4712868a11d6964660de3b9d97da0a7c8e3aefb" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.222203 4884 scope.go:117] "RemoveContainer" containerID="9319b0156366d80f8fd9d46684501f4be1f30bef0ec555307a572178115e10fb" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.248025 4884 scope.go:117] "RemoveContainer" containerID="81d90abbac07409cefd4c4887221da72b4c59103116572fb6fd34353d62b7d64" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.299399 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e732d264-88f6-46db-9eff-c7fb0b13e791-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx\" (UID: \"e732d264-88f6-46db-9eff-c7fb0b13e791\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.299617 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jz5d8\" (UniqueName: \"kubernetes.io/projected/e732d264-88f6-46db-9eff-c7fb0b13e791-kube-api-access-jz5d8\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx\" (UID: \"e732d264-88f6-46db-9eff-c7fb0b13e791\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.299867 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e732d264-88f6-46db-9eff-c7fb0b13e791-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx\" (UID: \"e732d264-88f6-46db-9eff-c7fb0b13e791\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.402418 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e732d264-88f6-46db-9eff-c7fb0b13e791-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx\" (UID: \"e732d264-88f6-46db-9eff-c7fb0b13e791\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.402770 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jz5d8\" (UniqueName: \"kubernetes.io/projected/e732d264-88f6-46db-9eff-c7fb0b13e791-kube-api-access-jz5d8\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx\" (UID: \"e732d264-88f6-46db-9eff-c7fb0b13e791\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.402836 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e732d264-88f6-46db-9eff-c7fb0b13e791-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx\" (UID: \"e732d264-88f6-46db-9eff-c7fb0b13e791\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.408237 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e732d264-88f6-46db-9eff-c7fb0b13e791-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx\" (UID: \"e732d264-88f6-46db-9eff-c7fb0b13e791\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.410788 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e732d264-88f6-46db-9eff-c7fb0b13e791-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx\" (UID: \"e732d264-88f6-46db-9eff-c7fb0b13e791\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.418977 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jz5d8\" (UniqueName: \"kubernetes.io/projected/e732d264-88f6-46db-9eff-c7fb0b13e791-kube-api-access-jz5d8\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx\" (UID: \"e732d264-88f6-46db-9eff-c7fb0b13e791\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:04:39 crc kubenswrapper[4884]: I1210 01:04:39.424918 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:04:40 crc kubenswrapper[4884]: I1210 01:04:40.153601 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx"] Dec 10 01:04:40 crc kubenswrapper[4884]: I1210 01:04:40.164591 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 01:04:41 crc kubenswrapper[4884]: I1210 01:04:41.024416 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" event={"ID":"e732d264-88f6-46db-9eff-c7fb0b13e791","Type":"ContainerStarted","Data":"fb6e7d072ea69b46dc52a8ca8ab64ab0fb12a71c8cc3c3dfe647033f2ccf16cd"} Dec 10 01:04:41 crc kubenswrapper[4884]: E1210 01:04:41.289839 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:04:42 crc kubenswrapper[4884]: I1210 01:04:42.041387 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" event={"ID":"e732d264-88f6-46db-9eff-c7fb0b13e791","Type":"ContainerStarted","Data":"fe37b24fc0fcb63f9e95efce9bf2ae9a51f9937fd71dcb245a9ee6d329d2ef0a"} Dec 10 01:04:42 crc kubenswrapper[4884]: I1210 01:04:42.073879 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" podStartSLOduration=2.313952218 podStartE2EDuration="3.073851726s" podCreationTimestamp="2025-12-10 01:04:39 +0000 UTC" firstStartedPulling="2025-12-10 01:04:40.164339648 +0000 UTC m=+2053.242296765" lastFinishedPulling="2025-12-10 01:04:40.924239116 +0000 UTC m=+2054.002196273" observedRunningTime="2025-12-10 01:04:42.062808266 +0000 UTC m=+2055.140765443" watchObservedRunningTime="2025-12-10 01:04:42.073851726 +0000 UTC m=+2055.151808873" Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.070474 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-xwwl8"] Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.094113 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-4150-account-create-update-pfkjg"] Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.098549 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.098622 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.098685 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.100033 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5e4d50ba947a172672504a4b76d8d8687fb7bd5358cadb2fdb3ee9a59857b81c"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.100188 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://5e4d50ba947a172672504a4b76d8d8687fb7bd5358cadb2fdb3ee9a59857b81c" gracePeriod=600 Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.103459 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-pm6qf"] Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.112347 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-0b32-account-create-update-lb248"] Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.122655 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-4150-account-create-update-pfkjg"] Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.131698 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-pm6qf"] Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.140713 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-xwwl8"] Dec 10 01:04:48 crc kubenswrapper[4884]: I1210 01:04:48.149196 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-0b32-account-create-update-lb248"] Dec 10 01:04:49 crc kubenswrapper[4884]: I1210 01:04:49.146458 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="5e4d50ba947a172672504a4b76d8d8687fb7bd5358cadb2fdb3ee9a59857b81c" exitCode=0 Dec 10 01:04:49 crc kubenswrapper[4884]: I1210 01:04:49.146580 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"5e4d50ba947a172672504a4b76d8d8687fb7bd5358cadb2fdb3ee9a59857b81c"} Dec 10 01:04:49 crc kubenswrapper[4884]: I1210 01:04:49.146911 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc"} Dec 10 01:04:49 crc kubenswrapper[4884]: I1210 01:04:49.146944 4884 scope.go:117] "RemoveContainer" containerID="794c17030931ad1ff4d9fa656c09c1efb16d3dd2127cd57c4b4403b8b3df8957" Dec 10 01:04:49 crc kubenswrapper[4884]: I1210 01:04:49.314286 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43" path="/var/lib/kubelet/pods/4bf7043d-5b2e-4b7d-b93f-fe6ae4585f43/volumes" Dec 10 01:04:49 crc kubenswrapper[4884]: I1210 01:04:49.315909 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67087c97-ae2d-400e-b3a4-455c5eb8f082" path="/var/lib/kubelet/pods/67087c97-ae2d-400e-b3a4-455c5eb8f082/volumes" Dec 10 01:04:49 crc kubenswrapper[4884]: I1210 01:04:49.317399 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ee033e3-bd08-4fe7-8efb-d6b81e79796c" path="/var/lib/kubelet/pods/9ee033e3-bd08-4fe7-8efb-d6b81e79796c/volumes" Dec 10 01:04:49 crc kubenswrapper[4884]: I1210 01:04:49.319567 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5b54dd3-a06f-4c77-88d4-88417c5d43eb" path="/var/lib/kubelet/pods/f5b54dd3-a06f-4c77-88d4-88417c5d43eb/volumes" Dec 10 01:04:50 crc kubenswrapper[4884]: E1210 01:04:50.426281 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:04:50 crc kubenswrapper[4884]: E1210 01:04:50.427017 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:04:50 crc kubenswrapper[4884]: E1210 01:04:50.427210 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:04:50 crc kubenswrapper[4884]: E1210 01:04:50.428565 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:04:53 crc kubenswrapper[4884]: I1210 01:04:53.062497 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-8xwrt"] Dec 10 01:04:53 crc kubenswrapper[4884]: I1210 01:04:53.078360 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7788-account-create-update-zwq2d"] Dec 10 01:04:53 crc kubenswrapper[4884]: I1210 01:04:53.092487 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-rx8lw"] Dec 10 01:04:53 crc kubenswrapper[4884]: I1210 01:04:53.102171 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-6db2-account-create-update-m9tlk"] Dec 10 01:04:53 crc kubenswrapper[4884]: I1210 01:04:53.111115 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-8xwrt"] Dec 10 01:04:53 crc kubenswrapper[4884]: I1210 01:04:53.119891 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7788-account-create-update-zwq2d"] Dec 10 01:04:53 crc kubenswrapper[4884]: I1210 01:04:53.130450 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-rx8lw"] Dec 10 01:04:53 crc kubenswrapper[4884]: I1210 01:04:53.139217 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-6db2-account-create-update-m9tlk"] Dec 10 01:04:53 crc kubenswrapper[4884]: I1210 01:04:53.304944 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e1759e5-b6a9-4815-b344-bc94fb621f14" path="/var/lib/kubelet/pods/2e1759e5-b6a9-4815-b344-bc94fb621f14/volumes" Dec 10 01:04:53 crc kubenswrapper[4884]: I1210 01:04:53.306337 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8af1f893-43a8-4ab0-8b44-4f2dd24340b0" path="/var/lib/kubelet/pods/8af1f893-43a8-4ab0-8b44-4f2dd24340b0/volumes" Dec 10 01:04:53 crc kubenswrapper[4884]: I1210 01:04:53.308222 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0386097-ea50-4085-a9f0-7fcde7163b25" path="/var/lib/kubelet/pods/b0386097-ea50-4085-a9f0-7fcde7163b25/volumes" Dec 10 01:04:53 crc kubenswrapper[4884]: I1210 01:04:53.309591 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2c2f4f5-5345-4503-be32-860ebcf42d21" path="/var/lib/kubelet/pods/c2c2f4f5-5345-4503-be32-860ebcf42d21/volumes" Dec 10 01:04:56 crc kubenswrapper[4884]: E1210 01:04:56.290685 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:04:57 crc kubenswrapper[4884]: I1210 01:04:57.054515 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-jczhh"] Dec 10 01:04:57 crc kubenswrapper[4884]: I1210 01:04:57.074909 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-jczhh"] Dec 10 01:04:57 crc kubenswrapper[4884]: I1210 01:04:57.312293 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="290cb163-68fb-48f9-a3d1-695333c2499b" path="/var/lib/kubelet/pods/290cb163-68fb-48f9-a3d1-695333c2499b/volumes" Dec 10 01:05:04 crc kubenswrapper[4884]: E1210 01:05:04.291285 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:05:10 crc kubenswrapper[4884]: E1210 01:05:10.420912 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:05:10 crc kubenswrapper[4884]: E1210 01:05:10.421345 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:05:10 crc kubenswrapper[4884]: E1210 01:05:10.421478 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:05:10 crc kubenswrapper[4884]: E1210 01:05:10.422954 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:05:15 crc kubenswrapper[4884]: E1210 01:05:15.290002 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:05:23 crc kubenswrapper[4884]: E1210 01:05:23.290944 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:05:27 crc kubenswrapper[4884]: I1210 01:05:27.086172 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-cljgw"] Dec 10 01:05:27 crc kubenswrapper[4884]: I1210 01:05:27.095450 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-bd4dd"] Dec 10 01:05:27 crc kubenswrapper[4884]: I1210 01:05:27.108000 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-cljgw"] Dec 10 01:05:27 crc kubenswrapper[4884]: I1210 01:05:27.117143 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-bd4dd"] Dec 10 01:05:27 crc kubenswrapper[4884]: I1210 01:05:27.310362 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="397f040a-a433-4049-8b8e-fcd35c003b15" path="/var/lib/kubelet/pods/397f040a-a433-4049-8b8e-fcd35c003b15/volumes" Dec 10 01:05:27 crc kubenswrapper[4884]: I1210 01:05:27.311194 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db50dd06-d67c-468e-88de-6a8fb86bd1bd" path="/var/lib/kubelet/pods/db50dd06-d67c-468e-88de-6a8fb86bd1bd/volumes" Dec 10 01:05:29 crc kubenswrapper[4884]: E1210 01:05:29.289676 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:05:34 crc kubenswrapper[4884]: E1210 01:05:34.301454 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:05:35 crc kubenswrapper[4884]: I1210 01:05:35.055366 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-hr4wb"] Dec 10 01:05:35 crc kubenswrapper[4884]: I1210 01:05:35.066211 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-hr4wb"] Dec 10 01:05:35 crc kubenswrapper[4884]: I1210 01:05:35.299334 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6943c1e6-d468-45b6-a2f7-831a6961e64a" path="/var/lib/kubelet/pods/6943c1e6-d468-45b6-a2f7-831a6961e64a/volumes" Dec 10 01:05:39 crc kubenswrapper[4884]: I1210 01:05:39.552448 4884 scope.go:117] "RemoveContainer" containerID="7fa3908321a1b04531697e7e1891c7c5ca5d267f890a92f0be0cdb5c708cd201" Dec 10 01:05:39 crc kubenswrapper[4884]: I1210 01:05:39.606163 4884 scope.go:117] "RemoveContainer" containerID="fca5fc589fbc2be3381c5c140f587adc8b9fb7a3c0a6f44d700fc828d97a39bc" Dec 10 01:05:39 crc kubenswrapper[4884]: I1210 01:05:39.675761 4884 scope.go:117] "RemoveContainer" containerID="fecadb8cf74ec042d376f671ca89fefddb08b082c7faa83c14319116ddff7839" Dec 10 01:05:39 crc kubenswrapper[4884]: I1210 01:05:39.712381 4884 scope.go:117] "RemoveContainer" containerID="09701bd735e5ab5cb4809ac7a4f35b50a301d0acf567f2f46ba54eaa6943d1a3" Dec 10 01:05:39 crc kubenswrapper[4884]: I1210 01:05:39.791928 4884 scope.go:117] "RemoveContainer" containerID="8488ed6846b9ccd6b1a82f369c7730460c2865ee0ca20b9303defb047555336b" Dec 10 01:05:39 crc kubenswrapper[4884]: I1210 01:05:39.827173 4884 scope.go:117] "RemoveContainer" containerID="506fc9ae052bbb238d93afe313442339b94b0d4e58ec3e81358996a6eb179396" Dec 10 01:05:39 crc kubenswrapper[4884]: I1210 01:05:39.878493 4884 scope.go:117] "RemoveContainer" containerID="6c7978f28cd94d2d6706df19963d9d6859d1bf52b2cad035e9b400dcdc3c853f" Dec 10 01:05:39 crc kubenswrapper[4884]: I1210 01:05:39.911169 4884 scope.go:117] "RemoveContainer" containerID="d7ee0373f33acbeee34548dbd59122b7458043112c5bd39c58dbc2638fc893a0" Dec 10 01:05:39 crc kubenswrapper[4884]: I1210 01:05:39.935477 4884 scope.go:117] "RemoveContainer" containerID="2c724810b32bcf2a1149726dff4e07751a96cb1a476b47900a738c030fa5eeb1" Dec 10 01:05:39 crc kubenswrapper[4884]: I1210 01:05:39.960722 4884 scope.go:117] "RemoveContainer" containerID="afd5fed80eeac90680b8eeb4e52d5c23f1108fce92b8d26143606b98ef33a5fd" Dec 10 01:05:39 crc kubenswrapper[4884]: I1210 01:05:39.988512 4884 scope.go:117] "RemoveContainer" containerID="ab1133ce19b4de1af4bbcf288ae900fd93816d8a9cded3eda5f3a35f3c086418" Dec 10 01:05:40 crc kubenswrapper[4884]: I1210 01:05:40.024402 4884 scope.go:117] "RemoveContainer" containerID="c937161b5d60dc9246e70fbc3f926ace9c34dd782303419af234f313a8bce6b8" Dec 10 01:05:42 crc kubenswrapper[4884]: I1210 01:05:42.044563 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-w7clk"] Dec 10 01:05:42 crc kubenswrapper[4884]: I1210 01:05:42.056737 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-w7clk"] Dec 10 01:05:43 crc kubenswrapper[4884]: I1210 01:05:43.308663 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b7ea171-752a-4d11-b544-d5c69f602dd0" path="/var/lib/kubelet/pods/0b7ea171-752a-4d11-b544-d5c69f602dd0/volumes" Dec 10 01:05:44 crc kubenswrapper[4884]: E1210 01:05:44.290487 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:05:46 crc kubenswrapper[4884]: E1210 01:05:46.290665 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:05:49 crc kubenswrapper[4884]: I1210 01:05:49.024918 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-fkf86"] Dec 10 01:05:49 crc kubenswrapper[4884]: I1210 01:05:49.032677 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-fkf86"] Dec 10 01:05:49 crc kubenswrapper[4884]: I1210 01:05:49.298833 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da3a231b-de19-4217-8b1b-54d40e56f0c3" path="/var/lib/kubelet/pods/da3a231b-de19-4217-8b1b-54d40e56f0c3/volumes" Dec 10 01:05:50 crc kubenswrapper[4884]: I1210 01:05:50.043935 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-brxjz"] Dec 10 01:05:50 crc kubenswrapper[4884]: I1210 01:05:50.057155 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-brxjz"] Dec 10 01:05:51 crc kubenswrapper[4884]: I1210 01:05:51.297818 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5aeda9c4-9011-47b9-8083-f0309ed8a010" path="/var/lib/kubelet/pods/5aeda9c4-9011-47b9-8083-f0309ed8a010/volumes" Dec 10 01:05:56 crc kubenswrapper[4884]: E1210 01:05:56.290940 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:06:00 crc kubenswrapper[4884]: E1210 01:06:00.291164 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:06:07 crc kubenswrapper[4884]: E1210 01:06:07.297769 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:06:13 crc kubenswrapper[4884]: E1210 01:06:13.290600 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:06:20 crc kubenswrapper[4884]: E1210 01:06:20.291946 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:06:27 crc kubenswrapper[4884]: E1210 01:06:27.299255 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:06:32 crc kubenswrapper[4884]: E1210 01:06:32.290604 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:06:33 crc kubenswrapper[4884]: I1210 01:06:33.057615 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-321f-account-create-update-sgsmd"] Dec 10 01:06:33 crc kubenswrapper[4884]: I1210 01:06:33.065089 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-321f-account-create-update-sgsmd"] Dec 10 01:06:33 crc kubenswrapper[4884]: I1210 01:06:33.301378 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="253e3b48-23a7-4433-9f12-ae9901a3260d" path="/var/lib/kubelet/pods/253e3b48-23a7-4433-9f12-ae9901a3260d/volumes" Dec 10 01:06:34 crc kubenswrapper[4884]: I1210 01:06:34.037067 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-mgp4f"] Dec 10 01:06:34 crc kubenswrapper[4884]: I1210 01:06:34.048488 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-l2wlj"] Dec 10 01:06:34 crc kubenswrapper[4884]: I1210 01:06:34.079727 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-mgp4f"] Dec 10 01:06:34 crc kubenswrapper[4884]: I1210 01:06:34.102040 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-l2wlj"] Dec 10 01:06:35 crc kubenswrapper[4884]: I1210 01:06:35.036548 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-f64f-account-create-update-qtqz4"] Dec 10 01:06:35 crc kubenswrapper[4884]: I1210 01:06:35.048902 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-c589-account-create-update-c99cj"] Dec 10 01:06:35 crc kubenswrapper[4884]: I1210 01:06:35.061071 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-dbzrq"] Dec 10 01:06:35 crc kubenswrapper[4884]: I1210 01:06:35.071865 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-f64f-account-create-update-qtqz4"] Dec 10 01:06:35 crc kubenswrapper[4884]: I1210 01:06:35.079084 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-c589-account-create-update-c99cj"] Dec 10 01:06:35 crc kubenswrapper[4884]: I1210 01:06:35.086457 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-dbzrq"] Dec 10 01:06:35 crc kubenswrapper[4884]: I1210 01:06:35.303664 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16359c7a-73d8-4c9b-bb45-2c81c5475330" path="/var/lib/kubelet/pods/16359c7a-73d8-4c9b-bb45-2c81c5475330/volumes" Dec 10 01:06:35 crc kubenswrapper[4884]: I1210 01:06:35.304213 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a5d45a0-51a8-4925-bbba-2b9d42ac9114" path="/var/lib/kubelet/pods/4a5d45a0-51a8-4925-bbba-2b9d42ac9114/volumes" Dec 10 01:06:35 crc kubenswrapper[4884]: I1210 01:06:35.304766 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="589b62f4-a91b-43b0-b49b-b303dcba8a67" path="/var/lib/kubelet/pods/589b62f4-a91b-43b0-b49b-b303dcba8a67/volumes" Dec 10 01:06:35 crc kubenswrapper[4884]: I1210 01:06:35.305273 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="636dddde-3987-4843-917d-956aedd66a22" path="/var/lib/kubelet/pods/636dddde-3987-4843-917d-956aedd66a22/volumes" Dec 10 01:06:35 crc kubenswrapper[4884]: I1210 01:06:35.306533 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbf4c134-f53b-4613-bc36-92d89f55f8be" path="/var/lib/kubelet/pods/bbf4c134-f53b-4613-bc36-92d89f55f8be/volumes" Dec 10 01:06:40 crc kubenswrapper[4884]: I1210 01:06:40.366178 4884 scope.go:117] "RemoveContainer" containerID="5a18b496c72d632255aaa36434d4eea0fa18107f47c02e03dd150602063028eb" Dec 10 01:06:40 crc kubenswrapper[4884]: I1210 01:06:40.399424 4884 scope.go:117] "RemoveContainer" containerID="4baacdec18785b8a5f5b8f64d33567ba2c1ff2bef77bb3126d0206aee00e2f7c" Dec 10 01:06:40 crc kubenswrapper[4884]: I1210 01:06:40.490589 4884 scope.go:117] "RemoveContainer" containerID="f43e9d15724a5efbdbefd705bb1c4b951ac9e1badc4cfdfb817b0021ecb2bf27" Dec 10 01:06:40 crc kubenswrapper[4884]: I1210 01:06:40.563929 4884 scope.go:117] "RemoveContainer" containerID="815e6501db560851dd335dc8262103f42f8bd1676a002e7d0597ca529220a1a3" Dec 10 01:06:40 crc kubenswrapper[4884]: I1210 01:06:40.615494 4884 scope.go:117] "RemoveContainer" containerID="be45d21ccc0b4620e7e950ab8d945e211dca47942735dc09cecd61ad45e57e4f" Dec 10 01:06:40 crc kubenswrapper[4884]: I1210 01:06:40.648905 4884 scope.go:117] "RemoveContainer" containerID="6e9e119f5965300ccc87f06e72653e87bd5b2f5f70c17279cb22ac8c531221d6" Dec 10 01:06:40 crc kubenswrapper[4884]: I1210 01:06:40.702511 4884 scope.go:117] "RemoveContainer" containerID="344d6683a1b9acb15fe9610d22ee9a38f0a4b65a8891b1193b1ae74d3fd3c617" Dec 10 01:06:40 crc kubenswrapper[4884]: I1210 01:06:40.745772 4884 scope.go:117] "RemoveContainer" containerID="151889c10087002c761da4afeefc56fafbf5a2b7e1f1449d485d869860e106a4" Dec 10 01:06:40 crc kubenswrapper[4884]: I1210 01:06:40.787636 4884 scope.go:117] "RemoveContainer" containerID="d8146157788d7ae948e46a96497a7398e021c95dd7d40fe13f4c5126c166fdfd" Dec 10 01:06:42 crc kubenswrapper[4884]: E1210 01:06:42.290301 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:06:44 crc kubenswrapper[4884]: E1210 01:06:44.290713 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:06:48 crc kubenswrapper[4884]: I1210 01:06:48.098483 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:06:48 crc kubenswrapper[4884]: I1210 01:06:48.099397 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:06:57 crc kubenswrapper[4884]: E1210 01:06:57.299751 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:06:58 crc kubenswrapper[4884]: E1210 01:06:58.288779 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:07:05 crc kubenswrapper[4884]: I1210 01:07:05.067902 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-f9dh7"] Dec 10 01:07:05 crc kubenswrapper[4884]: I1210 01:07:05.078124 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-f9dh7"] Dec 10 01:07:05 crc kubenswrapper[4884]: I1210 01:07:05.298406 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5950ff0d-ec3f-4bb0-8cdf-b2a536c71670" path="/var/lib/kubelet/pods/5950ff0d-ec3f-4bb0-8cdf-b2a536c71670/volumes" Dec 10 01:07:09 crc kubenswrapper[4884]: E1210 01:07:09.291401 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:07:09 crc kubenswrapper[4884]: E1210 01:07:09.291511 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:07:18 crc kubenswrapper[4884]: I1210 01:07:18.098641 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:07:18 crc kubenswrapper[4884]: I1210 01:07:18.101420 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:07:21 crc kubenswrapper[4884]: I1210 01:07:21.032476 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-07b9-account-create-update-24rtb"] Dec 10 01:07:21 crc kubenswrapper[4884]: I1210 01:07:21.042132 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-96hsw"] Dec 10 01:07:21 crc kubenswrapper[4884]: I1210 01:07:21.051871 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-96hsw"] Dec 10 01:07:21 crc kubenswrapper[4884]: I1210 01:07:21.061370 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-07b9-account-create-update-24rtb"] Dec 10 01:07:21 crc kubenswrapper[4884]: I1210 01:07:21.310829 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb77cff2-06ad-4b10-a4c4-89db9f13c65b" path="/var/lib/kubelet/pods/bb77cff2-06ad-4b10-a4c4-89db9f13c65b/volumes" Dec 10 01:07:21 crc kubenswrapper[4884]: I1210 01:07:21.312089 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c50eb7e4-a13a-4700-a41e-5246ff85985b" path="/var/lib/kubelet/pods/c50eb7e4-a13a-4700-a41e-5246ff85985b/volumes" Dec 10 01:07:22 crc kubenswrapper[4884]: E1210 01:07:22.294695 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:07:24 crc kubenswrapper[4884]: E1210 01:07:24.290966 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:07:33 crc kubenswrapper[4884]: E1210 01:07:33.291852 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:07:38 crc kubenswrapper[4884]: E1210 01:07:38.287953 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:07:41 crc kubenswrapper[4884]: I1210 01:07:41.007383 4884 scope.go:117] "RemoveContainer" containerID="281e0c38bca80be7f295a366197a64655016eee0b510252e9df825077f275d05" Dec 10 01:07:41 crc kubenswrapper[4884]: I1210 01:07:41.039873 4884 scope.go:117] "RemoveContainer" containerID="08cba2307bd9555ee7ef8245ffab41fc9f09781cc44049eb5b5fa323a72bc404" Dec 10 01:07:41 crc kubenswrapper[4884]: I1210 01:07:41.059651 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-2m7nt"] Dec 10 01:07:41 crc kubenswrapper[4884]: I1210 01:07:41.074385 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-2m7nt"] Dec 10 01:07:41 crc kubenswrapper[4884]: I1210 01:07:41.114815 4884 scope.go:117] "RemoveContainer" containerID="5cda245efda47526d292cbb1c535c23938a0cab0db243ee95d554069dae871bf" Dec 10 01:07:41 crc kubenswrapper[4884]: I1210 01:07:41.299226 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac5ac12f-a8ae-4710-a058-2eed2b61a0e3" path="/var/lib/kubelet/pods/ac5ac12f-a8ae-4710-a058-2eed2b61a0e3/volumes" Dec 10 01:07:43 crc kubenswrapper[4884]: I1210 01:07:43.045034 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-5nqf7"] Dec 10 01:07:43 crc kubenswrapper[4884]: I1210 01:07:43.061668 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-5nqf7"] Dec 10 01:07:43 crc kubenswrapper[4884]: I1210 01:07:43.305009 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2068f8d7-d11e-4ae1-845b-6a542dfe62fe" path="/var/lib/kubelet/pods/2068f8d7-d11e-4ae1-845b-6a542dfe62fe/volumes" Dec 10 01:07:46 crc kubenswrapper[4884]: E1210 01:07:46.291393 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:07:48 crc kubenswrapper[4884]: I1210 01:07:48.034758 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-mrh8v"] Dec 10 01:07:48 crc kubenswrapper[4884]: I1210 01:07:48.045274 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-mrh8v"] Dec 10 01:07:48 crc kubenswrapper[4884]: I1210 01:07:48.098390 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:07:48 crc kubenswrapper[4884]: I1210 01:07:48.098481 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:07:48 crc kubenswrapper[4884]: I1210 01:07:48.098596 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 01:07:48 crc kubenswrapper[4884]: I1210 01:07:48.099712 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 01:07:48 crc kubenswrapper[4884]: I1210 01:07:48.099812 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" gracePeriod=600 Dec 10 01:07:48 crc kubenswrapper[4884]: E1210 01:07:48.237323 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:07:48 crc kubenswrapper[4884]: I1210 01:07:48.407876 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" exitCode=0 Dec 10 01:07:48 crc kubenswrapper[4884]: I1210 01:07:48.407936 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc"} Dec 10 01:07:48 crc kubenswrapper[4884]: I1210 01:07:48.407985 4884 scope.go:117] "RemoveContainer" containerID="5e4d50ba947a172672504a4b76d8d8687fb7bd5358cadb2fdb3ee9a59857b81c" Dec 10 01:07:48 crc kubenswrapper[4884]: I1210 01:07:48.408948 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:07:48 crc kubenswrapper[4884]: E1210 01:07:48.409390 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:07:49 crc kubenswrapper[4884]: I1210 01:07:49.303943 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="595f30cf-13ba-48bb-949d-393f11660091" path="/var/lib/kubelet/pods/595f30cf-13ba-48bb-949d-393f11660091/volumes" Dec 10 01:07:52 crc kubenswrapper[4884]: E1210 01:07:52.292938 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:07:54 crc kubenswrapper[4884]: I1210 01:07:54.842384 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bj5kl"] Dec 10 01:07:54 crc kubenswrapper[4884]: I1210 01:07:54.846417 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:07:54 crc kubenswrapper[4884]: I1210 01:07:54.863789 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bj5kl"] Dec 10 01:07:54 crc kubenswrapper[4884]: I1210 01:07:54.914542 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-catalog-content\") pod \"community-operators-bj5kl\" (UID: \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\") " pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:07:54 crc kubenswrapper[4884]: I1210 01:07:54.914692 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-utilities\") pod \"community-operators-bj5kl\" (UID: \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\") " pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:07:54 crc kubenswrapper[4884]: I1210 01:07:54.914750 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjtfd\" (UniqueName: \"kubernetes.io/projected/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-kube-api-access-bjtfd\") pod \"community-operators-bj5kl\" (UID: \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\") " pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:07:55 crc kubenswrapper[4884]: I1210 01:07:55.015972 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-utilities\") pod \"community-operators-bj5kl\" (UID: \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\") " pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:07:55 crc kubenswrapper[4884]: I1210 01:07:55.016056 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjtfd\" (UniqueName: \"kubernetes.io/projected/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-kube-api-access-bjtfd\") pod \"community-operators-bj5kl\" (UID: \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\") " pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:07:55 crc kubenswrapper[4884]: I1210 01:07:55.016198 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-catalog-content\") pod \"community-operators-bj5kl\" (UID: \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\") " pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:07:55 crc kubenswrapper[4884]: I1210 01:07:55.016529 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-utilities\") pod \"community-operators-bj5kl\" (UID: \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\") " pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:07:55 crc kubenswrapper[4884]: I1210 01:07:55.016629 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-catalog-content\") pod \"community-operators-bj5kl\" (UID: \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\") " pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:07:55 crc kubenswrapper[4884]: I1210 01:07:55.036735 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjtfd\" (UniqueName: \"kubernetes.io/projected/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-kube-api-access-bjtfd\") pod \"community-operators-bj5kl\" (UID: \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\") " pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:07:55 crc kubenswrapper[4884]: I1210 01:07:55.189366 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:07:55 crc kubenswrapper[4884]: I1210 01:07:55.737511 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bj5kl"] Dec 10 01:07:56 crc kubenswrapper[4884]: I1210 01:07:56.506092 4884 generic.go:334] "Generic (PLEG): container finished" podID="56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" containerID="67e68d5af3ec591a6c1efe1ca57416edb6b023a2db6147bd61492d8ef14015bb" exitCode=0 Dec 10 01:07:56 crc kubenswrapper[4884]: I1210 01:07:56.506144 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bj5kl" event={"ID":"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8","Type":"ContainerDied","Data":"67e68d5af3ec591a6c1efe1ca57416edb6b023a2db6147bd61492d8ef14015bb"} Dec 10 01:07:56 crc kubenswrapper[4884]: I1210 01:07:56.506173 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bj5kl" event={"ID":"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8","Type":"ContainerStarted","Data":"61a72d74b2366934d41d8914aa395d14b66707ed7a5ef153f274b27053537033"} Dec 10 01:07:57 crc kubenswrapper[4884]: I1210 01:07:57.517866 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bj5kl" event={"ID":"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8","Type":"ContainerStarted","Data":"a155141524b30f3cb0d1470df8c03849aeeade59d048e7b38d30b35ee4b29bc6"} Dec 10 01:07:58 crc kubenswrapper[4884]: E1210 01:07:58.289843 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:07:58 crc kubenswrapper[4884]: I1210 01:07:58.529656 4884 generic.go:334] "Generic (PLEG): container finished" podID="56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" containerID="a155141524b30f3cb0d1470df8c03849aeeade59d048e7b38d30b35ee4b29bc6" exitCode=0 Dec 10 01:07:58 crc kubenswrapper[4884]: I1210 01:07:58.529704 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bj5kl" event={"ID":"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8","Type":"ContainerDied","Data":"a155141524b30f3cb0d1470df8c03849aeeade59d048e7b38d30b35ee4b29bc6"} Dec 10 01:07:59 crc kubenswrapper[4884]: I1210 01:07:59.540224 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bj5kl" event={"ID":"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8","Type":"ContainerStarted","Data":"266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b"} Dec 10 01:07:59 crc kubenswrapper[4884]: I1210 01:07:59.571667 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bj5kl" podStartSLOduration=3.143176805 podStartE2EDuration="5.571647671s" podCreationTimestamp="2025-12-10 01:07:54 +0000 UTC" firstStartedPulling="2025-12-10 01:07:56.50814475 +0000 UTC m=+2249.586101867" lastFinishedPulling="2025-12-10 01:07:58.936615606 +0000 UTC m=+2252.014572733" observedRunningTime="2025-12-10 01:07:59.563352856 +0000 UTC m=+2252.641309983" watchObservedRunningTime="2025-12-10 01:07:59.571647671 +0000 UTC m=+2252.649604788" Dec 10 01:08:00 crc kubenswrapper[4884]: I1210 01:08:00.286840 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:08:00 crc kubenswrapper[4884]: E1210 01:08:00.287244 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:08:04 crc kubenswrapper[4884]: E1210 01:08:04.292880 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:08:05 crc kubenswrapper[4884]: I1210 01:08:05.193860 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:08:05 crc kubenswrapper[4884]: I1210 01:08:05.194125 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:08:05 crc kubenswrapper[4884]: I1210 01:08:05.249470 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:08:05 crc kubenswrapper[4884]: I1210 01:08:05.683925 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:08:05 crc kubenswrapper[4884]: I1210 01:08:05.749940 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bj5kl"] Dec 10 01:08:07 crc kubenswrapper[4884]: I1210 01:08:07.635159 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bj5kl" podUID="56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" containerName="registry-server" containerID="cri-o://266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b" gracePeriod=2 Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.190577 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.279899 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-catalog-content\") pod \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\" (UID: \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\") " Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.280006 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-utilities\") pod \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\" (UID: \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\") " Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.280099 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjtfd\" (UniqueName: \"kubernetes.io/projected/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-kube-api-access-bjtfd\") pod \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\" (UID: \"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8\") " Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.280880 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-utilities" (OuterVolumeSpecName: "utilities") pod "56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" (UID: "56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.289505 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-kube-api-access-bjtfd" (OuterVolumeSpecName: "kube-api-access-bjtfd") pod "56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" (UID: "56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8"). InnerVolumeSpecName "kube-api-access-bjtfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.357595 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" (UID: "56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.383520 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.383583 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.383613 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjtfd\" (UniqueName: \"kubernetes.io/projected/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8-kube-api-access-bjtfd\") on node \"crc\" DevicePath \"\"" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.649896 4884 generic.go:334] "Generic (PLEG): container finished" podID="56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" containerID="266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b" exitCode=0 Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.649921 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bj5kl" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.649971 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bj5kl" event={"ID":"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8","Type":"ContainerDied","Data":"266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b"} Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.650027 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bj5kl" event={"ID":"56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8","Type":"ContainerDied","Data":"61a72d74b2366934d41d8914aa395d14b66707ed7a5ef153f274b27053537033"} Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.650055 4884 scope.go:117] "RemoveContainer" containerID="266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.677065 4884 scope.go:117] "RemoveContainer" containerID="a155141524b30f3cb0d1470df8c03849aeeade59d048e7b38d30b35ee4b29bc6" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.725237 4884 scope.go:117] "RemoveContainer" containerID="67e68d5af3ec591a6c1efe1ca57416edb6b023a2db6147bd61492d8ef14015bb" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.728368 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bj5kl"] Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.740682 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bj5kl"] Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.783794 4884 scope.go:117] "RemoveContainer" containerID="266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b" Dec 10 01:08:08 crc kubenswrapper[4884]: E1210 01:08:08.784247 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b\": container with ID starting with 266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b not found: ID does not exist" containerID="266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.784303 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b"} err="failed to get container status \"266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b\": rpc error: code = NotFound desc = could not find container \"266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b\": container with ID starting with 266dcbbbe8882562289eba92a995468fe415950a76ab6453edd7c41406616e0b not found: ID does not exist" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.784339 4884 scope.go:117] "RemoveContainer" containerID="a155141524b30f3cb0d1470df8c03849aeeade59d048e7b38d30b35ee4b29bc6" Dec 10 01:08:08 crc kubenswrapper[4884]: E1210 01:08:08.784725 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a155141524b30f3cb0d1470df8c03849aeeade59d048e7b38d30b35ee4b29bc6\": container with ID starting with a155141524b30f3cb0d1470df8c03849aeeade59d048e7b38d30b35ee4b29bc6 not found: ID does not exist" containerID="a155141524b30f3cb0d1470df8c03849aeeade59d048e7b38d30b35ee4b29bc6" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.784765 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a155141524b30f3cb0d1470df8c03849aeeade59d048e7b38d30b35ee4b29bc6"} err="failed to get container status \"a155141524b30f3cb0d1470df8c03849aeeade59d048e7b38d30b35ee4b29bc6\": rpc error: code = NotFound desc = could not find container \"a155141524b30f3cb0d1470df8c03849aeeade59d048e7b38d30b35ee4b29bc6\": container with ID starting with a155141524b30f3cb0d1470df8c03849aeeade59d048e7b38d30b35ee4b29bc6 not found: ID does not exist" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.784792 4884 scope.go:117] "RemoveContainer" containerID="67e68d5af3ec591a6c1efe1ca57416edb6b023a2db6147bd61492d8ef14015bb" Dec 10 01:08:08 crc kubenswrapper[4884]: E1210 01:08:08.785201 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67e68d5af3ec591a6c1efe1ca57416edb6b023a2db6147bd61492d8ef14015bb\": container with ID starting with 67e68d5af3ec591a6c1efe1ca57416edb6b023a2db6147bd61492d8ef14015bb not found: ID does not exist" containerID="67e68d5af3ec591a6c1efe1ca57416edb6b023a2db6147bd61492d8ef14015bb" Dec 10 01:08:08 crc kubenswrapper[4884]: I1210 01:08:08.785248 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67e68d5af3ec591a6c1efe1ca57416edb6b023a2db6147bd61492d8ef14015bb"} err="failed to get container status \"67e68d5af3ec591a6c1efe1ca57416edb6b023a2db6147bd61492d8ef14015bb\": rpc error: code = NotFound desc = could not find container \"67e68d5af3ec591a6c1efe1ca57416edb6b023a2db6147bd61492d8ef14015bb\": container with ID starting with 67e68d5af3ec591a6c1efe1ca57416edb6b023a2db6147bd61492d8ef14015bb not found: ID does not exist" Dec 10 01:08:09 crc kubenswrapper[4884]: I1210 01:08:09.301644 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" path="/var/lib/kubelet/pods/56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8/volumes" Dec 10 01:08:12 crc kubenswrapper[4884]: E1210 01:08:12.289965 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:08:14 crc kubenswrapper[4884]: I1210 01:08:14.286898 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:08:14 crc kubenswrapper[4884]: E1210 01:08:14.287631 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:08:18 crc kubenswrapper[4884]: E1210 01:08:18.289102 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:08:23 crc kubenswrapper[4884]: E1210 01:08:23.291537 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:08:24 crc kubenswrapper[4884]: I1210 01:08:24.861828 4884 generic.go:334] "Generic (PLEG): container finished" podID="e732d264-88f6-46db-9eff-c7fb0b13e791" containerID="fe37b24fc0fcb63f9e95efce9bf2ae9a51f9937fd71dcb245a9ee6d329d2ef0a" exitCode=0 Dec 10 01:08:24 crc kubenswrapper[4884]: I1210 01:08:24.861925 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" event={"ID":"e732d264-88f6-46db-9eff-c7fb0b13e791","Type":"ContainerDied","Data":"fe37b24fc0fcb63f9e95efce9bf2ae9a51f9937fd71dcb245a9ee6d329d2ef0a"} Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.510778 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.658350 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e732d264-88f6-46db-9eff-c7fb0b13e791-inventory\") pod \"e732d264-88f6-46db-9eff-c7fb0b13e791\" (UID: \"e732d264-88f6-46db-9eff-c7fb0b13e791\") " Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.659087 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e732d264-88f6-46db-9eff-c7fb0b13e791-ssh-key\") pod \"e732d264-88f6-46db-9eff-c7fb0b13e791\" (UID: \"e732d264-88f6-46db-9eff-c7fb0b13e791\") " Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.659290 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jz5d8\" (UniqueName: \"kubernetes.io/projected/e732d264-88f6-46db-9eff-c7fb0b13e791-kube-api-access-jz5d8\") pod \"e732d264-88f6-46db-9eff-c7fb0b13e791\" (UID: \"e732d264-88f6-46db-9eff-c7fb0b13e791\") " Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.667012 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e732d264-88f6-46db-9eff-c7fb0b13e791-kube-api-access-jz5d8" (OuterVolumeSpecName: "kube-api-access-jz5d8") pod "e732d264-88f6-46db-9eff-c7fb0b13e791" (UID: "e732d264-88f6-46db-9eff-c7fb0b13e791"). InnerVolumeSpecName "kube-api-access-jz5d8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.714267 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e732d264-88f6-46db-9eff-c7fb0b13e791-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e732d264-88f6-46db-9eff-c7fb0b13e791" (UID: "e732d264-88f6-46db-9eff-c7fb0b13e791"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.724850 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e732d264-88f6-46db-9eff-c7fb0b13e791-inventory" (OuterVolumeSpecName: "inventory") pod "e732d264-88f6-46db-9eff-c7fb0b13e791" (UID: "e732d264-88f6-46db-9eff-c7fb0b13e791"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.767904 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e732d264-88f6-46db-9eff-c7fb0b13e791-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.767939 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jz5d8\" (UniqueName: \"kubernetes.io/projected/e732d264-88f6-46db-9eff-c7fb0b13e791-kube-api-access-jz5d8\") on node \"crc\" DevicePath \"\"" Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.767983 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e732d264-88f6-46db-9eff-c7fb0b13e791-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.889264 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" event={"ID":"e732d264-88f6-46db-9eff-c7fb0b13e791","Type":"ContainerDied","Data":"fb6e7d072ea69b46dc52a8ca8ab64ab0fb12a71c8cc3c3dfe647033f2ccf16cd"} Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.889375 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx" Dec 10 01:08:26 crc kubenswrapper[4884]: I1210 01:08:26.889468 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb6e7d072ea69b46dc52a8ca8ab64ab0fb12a71c8cc3c3dfe647033f2ccf16cd" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.022285 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm"] Dec 10 01:08:27 crc kubenswrapper[4884]: E1210 01:08:27.022812 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" containerName="extract-utilities" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.022837 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" containerName="extract-utilities" Dec 10 01:08:27 crc kubenswrapper[4884]: E1210 01:08:27.022872 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" containerName="extract-content" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.022881 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" containerName="extract-content" Dec 10 01:08:27 crc kubenswrapper[4884]: E1210 01:08:27.022905 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" containerName="registry-server" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.022913 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" containerName="registry-server" Dec 10 01:08:27 crc kubenswrapper[4884]: E1210 01:08:27.022926 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e732d264-88f6-46db-9eff-c7fb0b13e791" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.022935 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e732d264-88f6-46db-9eff-c7fb0b13e791" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.023195 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e732d264-88f6-46db-9eff-c7fb0b13e791" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.023229 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="56faf8a9-78d6-4e0d-a47b-d1e5baa48ce8" containerName="registry-server" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.024107 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.038232 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.038380 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.038462 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.041738 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm"] Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.043962 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.178694 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2q85\" (UniqueName: \"kubernetes.io/projected/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-kube-api-access-z2q85\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm\" (UID: \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.179235 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm\" (UID: \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.179380 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm\" (UID: \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.281295 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2q85\" (UniqueName: \"kubernetes.io/projected/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-kube-api-access-z2q85\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm\" (UID: \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.281381 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm\" (UID: \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.281606 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm\" (UID: \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.284751 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.284905 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.296893 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:08:27 crc kubenswrapper[4884]: E1210 01:08:27.297764 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.301270 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm\" (UID: \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.301872 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm\" (UID: \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.312270 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2q85\" (UniqueName: \"kubernetes.io/projected/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-kube-api-access-z2q85\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm\" (UID: \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.371028 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:08:27 crc kubenswrapper[4884]: I1210 01:08:27.380143 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:28 crc kubenswrapper[4884]: I1210 01:08:28.021388 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm"] Dec 10 01:08:28 crc kubenswrapper[4884]: I1210 01:08:28.546293 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:08:28 crc kubenswrapper[4884]: I1210 01:08:28.927666 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" event={"ID":"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e","Type":"ContainerStarted","Data":"c48d5504f569e4b74f6bb3150f354d6ed2bb6d1d44299afa6eef582ba07fba11"} Dec 10 01:08:29 crc kubenswrapper[4884]: I1210 01:08:29.072357 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-4st4z"] Dec 10 01:08:29 crc kubenswrapper[4884]: I1210 01:08:29.089337 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-4st4z"] Dec 10 01:08:29 crc kubenswrapper[4884]: I1210 01:08:29.308367 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ff4c708-d45a-4f64-aab0-db7234765ea8" path="/var/lib/kubelet/pods/7ff4c708-d45a-4f64-aab0-db7234765ea8/volumes" Dec 10 01:08:29 crc kubenswrapper[4884]: I1210 01:08:29.955844 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" event={"ID":"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e","Type":"ContainerStarted","Data":"8535c3ffb298093d01ed2ab479bf2900e147e67bf74cfaba8aabced28b9d57c8"} Dec 10 01:08:30 crc kubenswrapper[4884]: I1210 01:08:30.001537 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" podStartSLOduration=3.495154677 podStartE2EDuration="4.001507356s" podCreationTimestamp="2025-12-10 01:08:26 +0000 UTC" firstStartedPulling="2025-12-10 01:08:28.037173914 +0000 UTC m=+2281.115131031" lastFinishedPulling="2025-12-10 01:08:28.543526593 +0000 UTC m=+2281.621483710" observedRunningTime="2025-12-10 01:08:29.985479381 +0000 UTC m=+2283.063436508" watchObservedRunningTime="2025-12-10 01:08:30.001507356 +0000 UTC m=+2283.079464503" Dec 10 01:08:30 crc kubenswrapper[4884]: E1210 01:08:30.289544 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:08:35 crc kubenswrapper[4884]: I1210 01:08:35.017220 4884 generic.go:334] "Generic (PLEG): container finished" podID="2feb23bd-a4c7-4c90-9793-d2abb8c7c02e" containerID="8535c3ffb298093d01ed2ab479bf2900e147e67bf74cfaba8aabced28b9d57c8" exitCode=0 Dec 10 01:08:35 crc kubenswrapper[4884]: I1210 01:08:35.017296 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" event={"ID":"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e","Type":"ContainerDied","Data":"8535c3ffb298093d01ed2ab479bf2900e147e67bf74cfaba8aabced28b9d57c8"} Dec 10 01:08:35 crc kubenswrapper[4884]: E1210 01:08:35.298158 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:08:36 crc kubenswrapper[4884]: I1210 01:08:36.523334 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:36 crc kubenswrapper[4884]: I1210 01:08:36.550838 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-ssh-key\") pod \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\" (UID: \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\") " Dec 10 01:08:36 crc kubenswrapper[4884]: I1210 01:08:36.550908 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-inventory\") pod \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\" (UID: \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\") " Dec 10 01:08:36 crc kubenswrapper[4884]: I1210 01:08:36.550990 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2q85\" (UniqueName: \"kubernetes.io/projected/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-kube-api-access-z2q85\") pod \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\" (UID: \"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e\") " Dec 10 01:08:36 crc kubenswrapper[4884]: I1210 01:08:36.566234 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-kube-api-access-z2q85" (OuterVolumeSpecName: "kube-api-access-z2q85") pod "2feb23bd-a4c7-4c90-9793-d2abb8c7c02e" (UID: "2feb23bd-a4c7-4c90-9793-d2abb8c7c02e"). InnerVolumeSpecName "kube-api-access-z2q85". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:08:36 crc kubenswrapper[4884]: I1210 01:08:36.610229 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2feb23bd-a4c7-4c90-9793-d2abb8c7c02e" (UID: "2feb23bd-a4c7-4c90-9793-d2abb8c7c02e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:08:36 crc kubenswrapper[4884]: I1210 01:08:36.616057 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-inventory" (OuterVolumeSpecName: "inventory") pod "2feb23bd-a4c7-4c90-9793-d2abb8c7c02e" (UID: "2feb23bd-a4c7-4c90-9793-d2abb8c7c02e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:08:36 crc kubenswrapper[4884]: I1210 01:08:36.655729 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2q85\" (UniqueName: \"kubernetes.io/projected/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-kube-api-access-z2q85\") on node \"crc\" DevicePath \"\"" Dec 10 01:08:36 crc kubenswrapper[4884]: I1210 01:08:36.655764 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:08:36 crc kubenswrapper[4884]: I1210 01:08:36.655776 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2feb23bd-a4c7-4c90-9793-d2abb8c7c02e-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.045651 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" event={"ID":"2feb23bd-a4c7-4c90-9793-d2abb8c7c02e","Type":"ContainerDied","Data":"c48d5504f569e4b74f6bb3150f354d6ed2bb6d1d44299afa6eef582ba07fba11"} Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.045696 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c48d5504f569e4b74f6bb3150f354d6ed2bb6d1d44299afa6eef582ba07fba11" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.045714 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.206899 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx"] Dec 10 01:08:37 crc kubenswrapper[4884]: E1210 01:08:37.207522 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2feb23bd-a4c7-4c90-9793-d2abb8c7c02e" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.207580 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2feb23bd-a4c7-4c90-9793-d2abb8c7c02e" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.207904 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2feb23bd-a4c7-4c90-9793-d2abb8c7c02e" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.208753 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.213485 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.213601 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.213998 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.214257 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.228790 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx"] Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.374649 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c56wq\" (UniqueName: \"kubernetes.io/projected/47f1f2ff-140b-4a3c-b810-f25f60bf466f-kube-api-access-c56wq\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6fmlx\" (UID: \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.375204 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/47f1f2ff-140b-4a3c-b810-f25f60bf466f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6fmlx\" (UID: \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.375552 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47f1f2ff-140b-4a3c-b810-f25f60bf466f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6fmlx\" (UID: \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.478622 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/47f1f2ff-140b-4a3c-b810-f25f60bf466f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6fmlx\" (UID: \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.478936 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47f1f2ff-140b-4a3c-b810-f25f60bf466f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6fmlx\" (UID: \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.479187 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c56wq\" (UniqueName: \"kubernetes.io/projected/47f1f2ff-140b-4a3c-b810-f25f60bf466f-kube-api-access-c56wq\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6fmlx\" (UID: \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.485152 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/47f1f2ff-140b-4a3c-b810-f25f60bf466f-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6fmlx\" (UID: \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.486029 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47f1f2ff-140b-4a3c-b810-f25f60bf466f-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6fmlx\" (UID: \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.503646 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c56wq\" (UniqueName: \"kubernetes.io/projected/47f1f2ff-140b-4a3c-b810-f25f60bf466f-kube-api-access-c56wq\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-6fmlx\" (UID: \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.537968 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:08:37 crc kubenswrapper[4884]: I1210 01:08:37.962107 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx"] Dec 10 01:08:38 crc kubenswrapper[4884]: I1210 01:08:38.059577 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" event={"ID":"47f1f2ff-140b-4a3c-b810-f25f60bf466f","Type":"ContainerStarted","Data":"38690fc24b9d38201a7457580439ec96b5249f6e1e7111c2d69f8e7ff08ec9b1"} Dec 10 01:08:39 crc kubenswrapper[4884]: I1210 01:08:39.074045 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" event={"ID":"47f1f2ff-140b-4a3c-b810-f25f60bf466f","Type":"ContainerStarted","Data":"c0e51c07c27c9bf41ed46c47aa012991186185317f35f26d3039f356119605c4"} Dec 10 01:08:39 crc kubenswrapper[4884]: I1210 01:08:39.111378 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" podStartSLOduration=1.6482979009999998 podStartE2EDuration="2.111349727s" podCreationTimestamp="2025-12-10 01:08:37 +0000 UTC" firstStartedPulling="2025-12-10 01:08:37.966325163 +0000 UTC m=+2291.044282320" lastFinishedPulling="2025-12-10 01:08:38.429377019 +0000 UTC m=+2291.507334146" observedRunningTime="2025-12-10 01:08:39.095400784 +0000 UTC m=+2292.173357951" watchObservedRunningTime="2025-12-10 01:08:39.111349727 +0000 UTC m=+2292.189306874" Dec 10 01:08:41 crc kubenswrapper[4884]: I1210 01:08:41.240230 4884 scope.go:117] "RemoveContainer" containerID="1d164ad48e4dd6427611499a874a60b310b0ad64b7401cf5175ec98429159a81" Dec 10 01:08:41 crc kubenswrapper[4884]: I1210 01:08:41.313706 4884 scope.go:117] "RemoveContainer" containerID="a68414962cf847f04f21f34dcd1048893c60fc9263491ca6f386e0eb50c9e06b" Dec 10 01:08:41 crc kubenswrapper[4884]: I1210 01:08:41.386912 4884 scope.go:117] "RemoveContainer" containerID="def70ce602df51c6e48669ad5e4d78f29a59d87694800c6bb5dc62d31e3780b9" Dec 10 01:08:41 crc kubenswrapper[4884]: I1210 01:08:41.456911 4884 scope.go:117] "RemoveContainer" containerID="738972214ea3943c4c6a6ede34cb37e87ed21dce7a19d4f2dd9b23f72c2a8981" Dec 10 01:08:42 crc kubenswrapper[4884]: I1210 01:08:42.288896 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:08:42 crc kubenswrapper[4884]: E1210 01:08:42.289741 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:08:45 crc kubenswrapper[4884]: E1210 01:08:45.290795 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:08:49 crc kubenswrapper[4884]: E1210 01:08:49.292306 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:08:57 crc kubenswrapper[4884]: I1210 01:08:57.294094 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:08:57 crc kubenswrapper[4884]: E1210 01:08:57.295026 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:09:00 crc kubenswrapper[4884]: E1210 01:09:00.289670 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:09:02 crc kubenswrapper[4884]: E1210 01:09:02.291138 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:09:11 crc kubenswrapper[4884]: E1210 01:09:11.291052 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:09:12 crc kubenswrapper[4884]: I1210 01:09:12.287943 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:09:12 crc kubenswrapper[4884]: E1210 01:09:12.288164 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:09:17 crc kubenswrapper[4884]: E1210 01:09:17.302738 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.379882 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rslnk"] Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.384567 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.394181 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rslnk"] Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.518735 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pf75\" (UniqueName: \"kubernetes.io/projected/5a880e33-5286-43f9-86bd-54751e5212b3-kube-api-access-9pf75\") pod \"certified-operators-rslnk\" (UID: \"5a880e33-5286-43f9-86bd-54751e5212b3\") " pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.519008 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a880e33-5286-43f9-86bd-54751e5212b3-utilities\") pod \"certified-operators-rslnk\" (UID: \"5a880e33-5286-43f9-86bd-54751e5212b3\") " pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.519104 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a880e33-5286-43f9-86bd-54751e5212b3-catalog-content\") pod \"certified-operators-rslnk\" (UID: \"5a880e33-5286-43f9-86bd-54751e5212b3\") " pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.622858 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pf75\" (UniqueName: \"kubernetes.io/projected/5a880e33-5286-43f9-86bd-54751e5212b3-kube-api-access-9pf75\") pod \"certified-operators-rslnk\" (UID: \"5a880e33-5286-43f9-86bd-54751e5212b3\") " pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.622977 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a880e33-5286-43f9-86bd-54751e5212b3-utilities\") pod \"certified-operators-rslnk\" (UID: \"5a880e33-5286-43f9-86bd-54751e5212b3\") " pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.623017 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a880e33-5286-43f9-86bd-54751e5212b3-catalog-content\") pod \"certified-operators-rslnk\" (UID: \"5a880e33-5286-43f9-86bd-54751e5212b3\") " pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.623498 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a880e33-5286-43f9-86bd-54751e5212b3-utilities\") pod \"certified-operators-rslnk\" (UID: \"5a880e33-5286-43f9-86bd-54751e5212b3\") " pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.623634 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a880e33-5286-43f9-86bd-54751e5212b3-catalog-content\") pod \"certified-operators-rslnk\" (UID: \"5a880e33-5286-43f9-86bd-54751e5212b3\") " pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.657572 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pf75\" (UniqueName: \"kubernetes.io/projected/5a880e33-5286-43f9-86bd-54751e5212b3-kube-api-access-9pf75\") pod \"certified-operators-rslnk\" (UID: \"5a880e33-5286-43f9-86bd-54751e5212b3\") " pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.658358 4884 generic.go:334] "Generic (PLEG): container finished" podID="47f1f2ff-140b-4a3c-b810-f25f60bf466f" containerID="c0e51c07c27c9bf41ed46c47aa012991186185317f35f26d3039f356119605c4" exitCode=0 Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.658391 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" event={"ID":"47f1f2ff-140b-4a3c-b810-f25f60bf466f","Type":"ContainerDied","Data":"c0e51c07c27c9bf41ed46c47aa012991186185317f35f26d3039f356119605c4"} Dec 10 01:09:21 crc kubenswrapper[4884]: I1210 01:09:21.708447 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:22 crc kubenswrapper[4884]: I1210 01:09:22.284591 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rslnk"] Dec 10 01:09:22 crc kubenswrapper[4884]: E1210 01:09:22.288189 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:09:22 crc kubenswrapper[4884]: W1210 01:09:22.289191 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5a880e33_5286_43f9_86bd_54751e5212b3.slice/crio-09244bcc5a82de617332a5be01b067188a1e8c0510c1830626589bfe824193f7 WatchSource:0}: Error finding container 09244bcc5a82de617332a5be01b067188a1e8c0510c1830626589bfe824193f7: Status 404 returned error can't find the container with id 09244bcc5a82de617332a5be01b067188a1e8c0510c1830626589bfe824193f7 Dec 10 01:09:22 crc kubenswrapper[4884]: I1210 01:09:22.675236 4884 generic.go:334] "Generic (PLEG): container finished" podID="5a880e33-5286-43f9-86bd-54751e5212b3" containerID="5960ab004f67c60761e0c560c6a0c7b0535641cda665c208694e4fb7f7cc9442" exitCode=0 Dec 10 01:09:22 crc kubenswrapper[4884]: I1210 01:09:22.675365 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rslnk" event={"ID":"5a880e33-5286-43f9-86bd-54751e5212b3","Type":"ContainerDied","Data":"5960ab004f67c60761e0c560c6a0c7b0535641cda665c208694e4fb7f7cc9442"} Dec 10 01:09:22 crc kubenswrapper[4884]: I1210 01:09:22.675788 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rslnk" event={"ID":"5a880e33-5286-43f9-86bd-54751e5212b3","Type":"ContainerStarted","Data":"09244bcc5a82de617332a5be01b067188a1e8c0510c1830626589bfe824193f7"} Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.247586 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.295942 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:09:23 crc kubenswrapper[4884]: E1210 01:09:23.296777 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.372866 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c56wq\" (UniqueName: \"kubernetes.io/projected/47f1f2ff-140b-4a3c-b810-f25f60bf466f-kube-api-access-c56wq\") pod \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\" (UID: \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\") " Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.372921 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47f1f2ff-140b-4a3c-b810-f25f60bf466f-ssh-key\") pod \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\" (UID: \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\") " Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.372990 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/47f1f2ff-140b-4a3c-b810-f25f60bf466f-inventory\") pod \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\" (UID: \"47f1f2ff-140b-4a3c-b810-f25f60bf466f\") " Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.378936 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47f1f2ff-140b-4a3c-b810-f25f60bf466f-kube-api-access-c56wq" (OuterVolumeSpecName: "kube-api-access-c56wq") pod "47f1f2ff-140b-4a3c-b810-f25f60bf466f" (UID: "47f1f2ff-140b-4a3c-b810-f25f60bf466f"). InnerVolumeSpecName "kube-api-access-c56wq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.417822 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47f1f2ff-140b-4a3c-b810-f25f60bf466f-inventory" (OuterVolumeSpecName: "inventory") pod "47f1f2ff-140b-4a3c-b810-f25f60bf466f" (UID: "47f1f2ff-140b-4a3c-b810-f25f60bf466f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.424418 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47f1f2ff-140b-4a3c-b810-f25f60bf466f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "47f1f2ff-140b-4a3c-b810-f25f60bf466f" (UID: "47f1f2ff-140b-4a3c-b810-f25f60bf466f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.475326 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/47f1f2ff-140b-4a3c-b810-f25f60bf466f-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.475520 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c56wq\" (UniqueName: \"kubernetes.io/projected/47f1f2ff-140b-4a3c-b810-f25f60bf466f-kube-api-access-c56wq\") on node \"crc\" DevicePath \"\"" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.475583 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/47f1f2ff-140b-4a3c-b810-f25f60bf466f-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.693366 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" event={"ID":"47f1f2ff-140b-4a3c-b810-f25f60bf466f","Type":"ContainerDied","Data":"38690fc24b9d38201a7457580439ec96b5249f6e1e7111c2d69f8e7ff08ec9b1"} Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.693429 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="38690fc24b9d38201a7457580439ec96b5249f6e1e7111c2d69f8e7ff08ec9b1" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.693466 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-6fmlx" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.797220 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm"] Dec 10 01:09:23 crc kubenswrapper[4884]: E1210 01:09:23.797759 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47f1f2ff-140b-4a3c-b810-f25f60bf466f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.797785 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="47f1f2ff-140b-4a3c-b810-f25f60bf466f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.798095 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="47f1f2ff-140b-4a3c-b810-f25f60bf466f" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.799019 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.801401 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.801858 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.801996 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.802206 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.818692 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm"] Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.990418 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqrxp\" (UniqueName: \"kubernetes.io/projected/27cdea1b-8993-44d6-80f9-a4f46413a746-kube-api-access-sqrxp\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm\" (UID: \"27cdea1b-8993-44d6-80f9-a4f46413a746\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.990784 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/27cdea1b-8993-44d6-80f9-a4f46413a746-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm\" (UID: \"27cdea1b-8993-44d6-80f9-a4f46413a746\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:23 crc kubenswrapper[4884]: I1210 01:09:23.990849 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/27cdea1b-8993-44d6-80f9-a4f46413a746-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm\" (UID: \"27cdea1b-8993-44d6-80f9-a4f46413a746\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:24 crc kubenswrapper[4884]: I1210 01:09:24.093597 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqrxp\" (UniqueName: \"kubernetes.io/projected/27cdea1b-8993-44d6-80f9-a4f46413a746-kube-api-access-sqrxp\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm\" (UID: \"27cdea1b-8993-44d6-80f9-a4f46413a746\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:24 crc kubenswrapper[4884]: I1210 01:09:24.093965 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/27cdea1b-8993-44d6-80f9-a4f46413a746-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm\" (UID: \"27cdea1b-8993-44d6-80f9-a4f46413a746\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:24 crc kubenswrapper[4884]: I1210 01:09:24.094139 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/27cdea1b-8993-44d6-80f9-a4f46413a746-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm\" (UID: \"27cdea1b-8993-44d6-80f9-a4f46413a746\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:24 crc kubenswrapper[4884]: I1210 01:09:24.100119 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/27cdea1b-8993-44d6-80f9-a4f46413a746-ssh-key\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm\" (UID: \"27cdea1b-8993-44d6-80f9-a4f46413a746\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:24 crc kubenswrapper[4884]: I1210 01:09:24.100222 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/27cdea1b-8993-44d6-80f9-a4f46413a746-inventory\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm\" (UID: \"27cdea1b-8993-44d6-80f9-a4f46413a746\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:24 crc kubenswrapper[4884]: I1210 01:09:24.111982 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqrxp\" (UniqueName: \"kubernetes.io/projected/27cdea1b-8993-44d6-80f9-a4f46413a746-kube-api-access-sqrxp\") pod \"ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm\" (UID: \"27cdea1b-8993-44d6-80f9-a4f46413a746\") " pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:24 crc kubenswrapper[4884]: I1210 01:09:24.123856 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:24 crc kubenswrapper[4884]: I1210 01:09:24.711191 4884 generic.go:334] "Generic (PLEG): container finished" podID="5a880e33-5286-43f9-86bd-54751e5212b3" containerID="97f7179f27ad9d13a7c8efa480c2eda10f7cf607f9dc309c62072b5cf3f9f8f8" exitCode=0 Dec 10 01:09:24 crc kubenswrapper[4884]: I1210 01:09:24.711282 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rslnk" event={"ID":"5a880e33-5286-43f9-86bd-54751e5212b3","Type":"ContainerDied","Data":"97f7179f27ad9d13a7c8efa480c2eda10f7cf607f9dc309c62072b5cf3f9f8f8"} Dec 10 01:09:24 crc kubenswrapper[4884]: I1210 01:09:24.714398 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm"] Dec 10 01:09:24 crc kubenswrapper[4884]: W1210 01:09:24.724442 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27cdea1b_8993_44d6_80f9_a4f46413a746.slice/crio-15aa7f5f074548e28a0e18f28e25735a03c519b7f95129d82fd6a211b97fda0d WatchSource:0}: Error finding container 15aa7f5f074548e28a0e18f28e25735a03c519b7f95129d82fd6a211b97fda0d: Status 404 returned error can't find the container with id 15aa7f5f074548e28a0e18f28e25735a03c519b7f95129d82fd6a211b97fda0d Dec 10 01:09:25 crc kubenswrapper[4884]: I1210 01:09:25.726882 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" event={"ID":"27cdea1b-8993-44d6-80f9-a4f46413a746","Type":"ContainerStarted","Data":"757b3080bea25c3f0d94535e3731343e7f95f215a54dcd8a5054a0c18aa0e5ff"} Dec 10 01:09:25 crc kubenswrapper[4884]: I1210 01:09:25.727271 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" event={"ID":"27cdea1b-8993-44d6-80f9-a4f46413a746","Type":"ContainerStarted","Data":"15aa7f5f074548e28a0e18f28e25735a03c519b7f95129d82fd6a211b97fda0d"} Dec 10 01:09:25 crc kubenswrapper[4884]: I1210 01:09:25.729501 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rslnk" event={"ID":"5a880e33-5286-43f9-86bd-54751e5212b3","Type":"ContainerStarted","Data":"48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c"} Dec 10 01:09:25 crc kubenswrapper[4884]: I1210 01:09:25.745972 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" podStartSLOduration=2.068840109 podStartE2EDuration="2.745951084s" podCreationTimestamp="2025-12-10 01:09:23 +0000 UTC" firstStartedPulling="2025-12-10 01:09:24.735514907 +0000 UTC m=+2337.813472034" lastFinishedPulling="2025-12-10 01:09:25.412625862 +0000 UTC m=+2338.490583009" observedRunningTime="2025-12-10 01:09:25.742492829 +0000 UTC m=+2338.820449946" watchObservedRunningTime="2025-12-10 01:09:25.745951084 +0000 UTC m=+2338.823908211" Dec 10 01:09:25 crc kubenswrapper[4884]: I1210 01:09:25.784204 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rslnk" podStartSLOduration=2.106590682 podStartE2EDuration="4.784187879s" podCreationTimestamp="2025-12-10 01:09:21 +0000 UTC" firstStartedPulling="2025-12-10 01:09:22.678042951 +0000 UTC m=+2335.756000068" lastFinishedPulling="2025-12-10 01:09:25.355640128 +0000 UTC m=+2338.433597265" observedRunningTime="2025-12-10 01:09:25.758859993 +0000 UTC m=+2338.836817160" watchObservedRunningTime="2025-12-10 01:09:25.784187879 +0000 UTC m=+2338.862145056" Dec 10 01:09:29 crc kubenswrapper[4884]: E1210 01:09:29.304380 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:09:30 crc kubenswrapper[4884]: I1210 01:09:30.786248 4884 generic.go:334] "Generic (PLEG): container finished" podID="27cdea1b-8993-44d6-80f9-a4f46413a746" containerID="757b3080bea25c3f0d94535e3731343e7f95f215a54dcd8a5054a0c18aa0e5ff" exitCode=0 Dec 10 01:09:30 crc kubenswrapper[4884]: I1210 01:09:30.786386 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" event={"ID":"27cdea1b-8993-44d6-80f9-a4f46413a746","Type":"ContainerDied","Data":"757b3080bea25c3f0d94535e3731343e7f95f215a54dcd8a5054a0c18aa0e5ff"} Dec 10 01:09:31 crc kubenswrapper[4884]: I1210 01:09:31.709677 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:31 crc kubenswrapper[4884]: I1210 01:09:31.709762 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:31 crc kubenswrapper[4884]: I1210 01:09:31.798719 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:31 crc kubenswrapper[4884]: I1210 01:09:31.896596 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.054327 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rslnk"] Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.341861 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.493852 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/27cdea1b-8993-44d6-80f9-a4f46413a746-ssh-key\") pod \"27cdea1b-8993-44d6-80f9-a4f46413a746\" (UID: \"27cdea1b-8993-44d6-80f9-a4f46413a746\") " Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.493990 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/27cdea1b-8993-44d6-80f9-a4f46413a746-inventory\") pod \"27cdea1b-8993-44d6-80f9-a4f46413a746\" (UID: \"27cdea1b-8993-44d6-80f9-a4f46413a746\") " Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.494037 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqrxp\" (UniqueName: \"kubernetes.io/projected/27cdea1b-8993-44d6-80f9-a4f46413a746-kube-api-access-sqrxp\") pod \"27cdea1b-8993-44d6-80f9-a4f46413a746\" (UID: \"27cdea1b-8993-44d6-80f9-a4f46413a746\") " Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.500729 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27cdea1b-8993-44d6-80f9-a4f46413a746-kube-api-access-sqrxp" (OuterVolumeSpecName: "kube-api-access-sqrxp") pod "27cdea1b-8993-44d6-80f9-a4f46413a746" (UID: "27cdea1b-8993-44d6-80f9-a4f46413a746"). InnerVolumeSpecName "kube-api-access-sqrxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.534559 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27cdea1b-8993-44d6-80f9-a4f46413a746-inventory" (OuterVolumeSpecName: "inventory") pod "27cdea1b-8993-44d6-80f9-a4f46413a746" (UID: "27cdea1b-8993-44d6-80f9-a4f46413a746"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.549108 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27cdea1b-8993-44d6-80f9-a4f46413a746-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "27cdea1b-8993-44d6-80f9-a4f46413a746" (UID: "27cdea1b-8993-44d6-80f9-a4f46413a746"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.597804 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/27cdea1b-8993-44d6-80f9-a4f46413a746-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.597862 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/27cdea1b-8993-44d6-80f9-a4f46413a746-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.597887 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqrxp\" (UniqueName: \"kubernetes.io/projected/27cdea1b-8993-44d6-80f9-a4f46413a746-kube-api-access-sqrxp\") on node \"crc\" DevicePath \"\"" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.828885 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.828872 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm" event={"ID":"27cdea1b-8993-44d6-80f9-a4f46413a746","Type":"ContainerDied","Data":"15aa7f5f074548e28a0e18f28e25735a03c519b7f95129d82fd6a211b97fda0d"} Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.828974 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15aa7f5f074548e28a0e18f28e25735a03c519b7f95129d82fd6a211b97fda0d" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.914951 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql"] Dec 10 01:09:32 crc kubenswrapper[4884]: E1210 01:09:32.915736 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27cdea1b-8993-44d6-80f9-a4f46413a746" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.915772 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="27cdea1b-8993-44d6-80f9-a4f46413a746" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.916208 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="27cdea1b-8993-44d6-80f9-a4f46413a746" containerName="ceph-hci-pre-edpm-deployment-openstack-edpm-ipam" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.917485 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.920702 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.921185 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.921961 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.922028 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:09:32 crc kubenswrapper[4884]: I1210 01:09:32.929457 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql"] Dec 10 01:09:33 crc kubenswrapper[4884]: I1210 01:09:33.109205 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebb9744c-67b7-4ffb-8b42-88feca31263f-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql\" (UID: \"ebb9744c-67b7-4ffb-8b42-88feca31263f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:09:33 crc kubenswrapper[4884]: I1210 01:09:33.109275 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebb9744c-67b7-4ffb-8b42-88feca31263f-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql\" (UID: \"ebb9744c-67b7-4ffb-8b42-88feca31263f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:09:33 crc kubenswrapper[4884]: I1210 01:09:33.109668 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2jbp\" (UniqueName: \"kubernetes.io/projected/ebb9744c-67b7-4ffb-8b42-88feca31263f-kube-api-access-c2jbp\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql\" (UID: \"ebb9744c-67b7-4ffb-8b42-88feca31263f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:09:33 crc kubenswrapper[4884]: I1210 01:09:33.228404 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebb9744c-67b7-4ffb-8b42-88feca31263f-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql\" (UID: \"ebb9744c-67b7-4ffb-8b42-88feca31263f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:09:33 crc kubenswrapper[4884]: I1210 01:09:33.228756 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebb9744c-67b7-4ffb-8b42-88feca31263f-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql\" (UID: \"ebb9744c-67b7-4ffb-8b42-88feca31263f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:09:33 crc kubenswrapper[4884]: I1210 01:09:33.228997 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2jbp\" (UniqueName: \"kubernetes.io/projected/ebb9744c-67b7-4ffb-8b42-88feca31263f-kube-api-access-c2jbp\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql\" (UID: \"ebb9744c-67b7-4ffb-8b42-88feca31263f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:09:33 crc kubenswrapper[4884]: I1210 01:09:33.236384 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebb9744c-67b7-4ffb-8b42-88feca31263f-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql\" (UID: \"ebb9744c-67b7-4ffb-8b42-88feca31263f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:09:33 crc kubenswrapper[4884]: I1210 01:09:33.237028 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebb9744c-67b7-4ffb-8b42-88feca31263f-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql\" (UID: \"ebb9744c-67b7-4ffb-8b42-88feca31263f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:09:33 crc kubenswrapper[4884]: I1210 01:09:33.260017 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2jbp\" (UniqueName: \"kubernetes.io/projected/ebb9744c-67b7-4ffb-8b42-88feca31263f-kube-api-access-c2jbp\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql\" (UID: \"ebb9744c-67b7-4ffb-8b42-88feca31263f\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:09:33 crc kubenswrapper[4884]: E1210 01:09:33.289043 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:09:33 crc kubenswrapper[4884]: I1210 01:09:33.546800 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:09:33 crc kubenswrapper[4884]: I1210 01:09:33.838217 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rslnk" podUID="5a880e33-5286-43f9-86bd-54751e5212b3" containerName="registry-server" containerID="cri-o://48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c" gracePeriod=2 Dec 10 01:09:34 crc kubenswrapper[4884]: W1210 01:09:34.086256 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podebb9744c_67b7_4ffb_8b42_88feca31263f.slice/crio-bbf355b7279059c082fa4d8b22cf512de40884c636b11489bf6b7bb0d9017b36 WatchSource:0}: Error finding container bbf355b7279059c082fa4d8b22cf512de40884c636b11489bf6b7bb0d9017b36: Status 404 returned error can't find the container with id bbf355b7279059c082fa4d8b22cf512de40884c636b11489bf6b7bb0d9017b36 Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.099469 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql"] Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.380956 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.460337 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9pf75\" (UniqueName: \"kubernetes.io/projected/5a880e33-5286-43f9-86bd-54751e5212b3-kube-api-access-9pf75\") pod \"5a880e33-5286-43f9-86bd-54751e5212b3\" (UID: \"5a880e33-5286-43f9-86bd-54751e5212b3\") " Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.460601 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a880e33-5286-43f9-86bd-54751e5212b3-catalog-content\") pod \"5a880e33-5286-43f9-86bd-54751e5212b3\" (UID: \"5a880e33-5286-43f9-86bd-54751e5212b3\") " Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.460689 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a880e33-5286-43f9-86bd-54751e5212b3-utilities\") pod \"5a880e33-5286-43f9-86bd-54751e5212b3\" (UID: \"5a880e33-5286-43f9-86bd-54751e5212b3\") " Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.462725 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a880e33-5286-43f9-86bd-54751e5212b3-utilities" (OuterVolumeSpecName: "utilities") pod "5a880e33-5286-43f9-86bd-54751e5212b3" (UID: "5a880e33-5286-43f9-86bd-54751e5212b3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.482789 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a880e33-5286-43f9-86bd-54751e5212b3-kube-api-access-9pf75" (OuterVolumeSpecName: "kube-api-access-9pf75") pod "5a880e33-5286-43f9-86bd-54751e5212b3" (UID: "5a880e33-5286-43f9-86bd-54751e5212b3"). InnerVolumeSpecName "kube-api-access-9pf75". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.564613 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5a880e33-5286-43f9-86bd-54751e5212b3-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.564650 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9pf75\" (UniqueName: \"kubernetes.io/projected/5a880e33-5286-43f9-86bd-54751e5212b3-kube-api-access-9pf75\") on node \"crc\" DevicePath \"\"" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.856867 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" event={"ID":"ebb9744c-67b7-4ffb-8b42-88feca31263f","Type":"ContainerStarted","Data":"bbf355b7279059c082fa4d8b22cf512de40884c636b11489bf6b7bb0d9017b36"} Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.860089 4884 generic.go:334] "Generic (PLEG): container finished" podID="5a880e33-5286-43f9-86bd-54751e5212b3" containerID="48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c" exitCode=0 Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.860123 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rslnk" event={"ID":"5a880e33-5286-43f9-86bd-54751e5212b3","Type":"ContainerDied","Data":"48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c"} Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.860143 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rslnk" event={"ID":"5a880e33-5286-43f9-86bd-54751e5212b3","Type":"ContainerDied","Data":"09244bcc5a82de617332a5be01b067188a1e8c0510c1830626589bfe824193f7"} Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.860160 4884 scope.go:117] "RemoveContainer" containerID="48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.860218 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rslnk" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.874672 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5a880e33-5286-43f9-86bd-54751e5212b3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5a880e33-5286-43f9-86bd-54751e5212b3" (UID: "5a880e33-5286-43f9-86bd-54751e5212b3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.892351 4884 scope.go:117] "RemoveContainer" containerID="97f7179f27ad9d13a7c8efa480c2eda10f7cf607f9dc309c62072b5cf3f9f8f8" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.920993 4884 scope.go:117] "RemoveContainer" containerID="5960ab004f67c60761e0c560c6a0c7b0535641cda665c208694e4fb7f7cc9442" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.961624 4884 scope.go:117] "RemoveContainer" containerID="48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c" Dec 10 01:09:34 crc kubenswrapper[4884]: E1210 01:09:34.962332 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c\": container with ID starting with 48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c not found: ID does not exist" containerID="48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.962404 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c"} err="failed to get container status \"48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c\": rpc error: code = NotFound desc = could not find container \"48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c\": container with ID starting with 48d4ddcdf43bff261c2f47a63ce34e9ed5d57ac90b9fb1ac5b2775cdfac6040c not found: ID does not exist" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.963708 4884 scope.go:117] "RemoveContainer" containerID="97f7179f27ad9d13a7c8efa480c2eda10f7cf607f9dc309c62072b5cf3f9f8f8" Dec 10 01:09:34 crc kubenswrapper[4884]: E1210 01:09:34.965055 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97f7179f27ad9d13a7c8efa480c2eda10f7cf607f9dc309c62072b5cf3f9f8f8\": container with ID starting with 97f7179f27ad9d13a7c8efa480c2eda10f7cf607f9dc309c62072b5cf3f9f8f8 not found: ID does not exist" containerID="97f7179f27ad9d13a7c8efa480c2eda10f7cf607f9dc309c62072b5cf3f9f8f8" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.965094 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97f7179f27ad9d13a7c8efa480c2eda10f7cf607f9dc309c62072b5cf3f9f8f8"} err="failed to get container status \"97f7179f27ad9d13a7c8efa480c2eda10f7cf607f9dc309c62072b5cf3f9f8f8\": rpc error: code = NotFound desc = could not find container \"97f7179f27ad9d13a7c8efa480c2eda10f7cf607f9dc309c62072b5cf3f9f8f8\": container with ID starting with 97f7179f27ad9d13a7c8efa480c2eda10f7cf607f9dc309c62072b5cf3f9f8f8 not found: ID does not exist" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.965124 4884 scope.go:117] "RemoveContainer" containerID="5960ab004f67c60761e0c560c6a0c7b0535641cda665c208694e4fb7f7cc9442" Dec 10 01:09:34 crc kubenswrapper[4884]: E1210 01:09:34.965781 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5960ab004f67c60761e0c560c6a0c7b0535641cda665c208694e4fb7f7cc9442\": container with ID starting with 5960ab004f67c60761e0c560c6a0c7b0535641cda665c208694e4fb7f7cc9442 not found: ID does not exist" containerID="5960ab004f67c60761e0c560c6a0c7b0535641cda665c208694e4fb7f7cc9442" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.965829 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5960ab004f67c60761e0c560c6a0c7b0535641cda665c208694e4fb7f7cc9442"} err="failed to get container status \"5960ab004f67c60761e0c560c6a0c7b0535641cda665c208694e4fb7f7cc9442\": rpc error: code = NotFound desc = could not find container \"5960ab004f67c60761e0c560c6a0c7b0535641cda665c208694e4fb7f7cc9442\": container with ID starting with 5960ab004f67c60761e0c560c6a0c7b0535641cda665c208694e4fb7f7cc9442 not found: ID does not exist" Dec 10 01:09:34 crc kubenswrapper[4884]: I1210 01:09:34.978313 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5a880e33-5286-43f9-86bd-54751e5212b3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:09:35 crc kubenswrapper[4884]: I1210 01:09:35.203799 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rslnk"] Dec 10 01:09:35 crc kubenswrapper[4884]: I1210 01:09:35.212128 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rslnk"] Dec 10 01:09:35 crc kubenswrapper[4884]: I1210 01:09:35.302991 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a880e33-5286-43f9-86bd-54751e5212b3" path="/var/lib/kubelet/pods/5a880e33-5286-43f9-86bd-54751e5212b3/volumes" Dec 10 01:09:35 crc kubenswrapper[4884]: I1210 01:09:35.881828 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" event={"ID":"ebb9744c-67b7-4ffb-8b42-88feca31263f","Type":"ContainerStarted","Data":"b072da5bcf85e376c439dfa2746db77ef4f15f3f116406c73daac906f7cbaeaf"} Dec 10 01:09:35 crc kubenswrapper[4884]: I1210 01:09:35.916269 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" podStartSLOduration=3.321574364 podStartE2EDuration="3.916243126s" podCreationTimestamp="2025-12-10 01:09:32 +0000 UTC" firstStartedPulling="2025-12-10 01:09:34.097386436 +0000 UTC m=+2347.175343583" lastFinishedPulling="2025-12-10 01:09:34.692055218 +0000 UTC m=+2347.770012345" observedRunningTime="2025-12-10 01:09:35.904090007 +0000 UTC m=+2348.982047154" watchObservedRunningTime="2025-12-10 01:09:35.916243126 +0000 UTC m=+2348.994200283" Dec 10 01:09:36 crc kubenswrapper[4884]: I1210 01:09:36.288511 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:09:36 crc kubenswrapper[4884]: E1210 01:09:36.288861 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:09:43 crc kubenswrapper[4884]: E1210 01:09:43.289712 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:09:46 crc kubenswrapper[4884]: E1210 01:09:46.290545 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:09:47 crc kubenswrapper[4884]: I1210 01:09:47.293288 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:09:47 crc kubenswrapper[4884]: E1210 01:09:47.293768 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:09:58 crc kubenswrapper[4884]: E1210 01:09:58.289645 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:09:58 crc kubenswrapper[4884]: I1210 01:09:58.289826 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 01:09:58 crc kubenswrapper[4884]: E1210 01:09:58.422465 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:09:58 crc kubenswrapper[4884]: E1210 01:09:58.422559 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:09:58 crc kubenswrapper[4884]: E1210 01:09:58.422782 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:09:58 crc kubenswrapper[4884]: E1210 01:09:58.424012 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:10:01 crc kubenswrapper[4884]: I1210 01:10:01.288116 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:10:01 crc kubenswrapper[4884]: E1210 01:10:01.289359 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:10:13 crc kubenswrapper[4884]: I1210 01:10:13.287077 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:10:13 crc kubenswrapper[4884]: E1210 01:10:13.287844 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:10:13 crc kubenswrapper[4884]: E1210 01:10:13.443631 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:10:13 crc kubenswrapper[4884]: E1210 01:10:13.444103 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:10:13 crc kubenswrapper[4884]: E1210 01:10:13.444320 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:10:13 crc kubenswrapper[4884]: E1210 01:10:13.445856 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:10:14 crc kubenswrapper[4884]: E1210 01:10:14.290790 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:10:24 crc kubenswrapper[4884]: I1210 01:10:24.287707 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:10:24 crc kubenswrapper[4884]: E1210 01:10:24.288517 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:10:25 crc kubenswrapper[4884]: E1210 01:10:25.289574 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:10:27 crc kubenswrapper[4884]: E1210 01:10:27.303504 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:10:34 crc kubenswrapper[4884]: I1210 01:10:34.619104 4884 generic.go:334] "Generic (PLEG): container finished" podID="ebb9744c-67b7-4ffb-8b42-88feca31263f" containerID="b072da5bcf85e376c439dfa2746db77ef4f15f3f116406c73daac906f7cbaeaf" exitCode=0 Dec 10 01:10:34 crc kubenswrapper[4884]: I1210 01:10:34.619188 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" event={"ID":"ebb9744c-67b7-4ffb-8b42-88feca31263f","Type":"ContainerDied","Data":"b072da5bcf85e376c439dfa2746db77ef4f15f3f116406c73daac906f7cbaeaf"} Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.179753 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.298787 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebb9744c-67b7-4ffb-8b42-88feca31263f-ssh-key\") pod \"ebb9744c-67b7-4ffb-8b42-88feca31263f\" (UID: \"ebb9744c-67b7-4ffb-8b42-88feca31263f\") " Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.299135 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebb9744c-67b7-4ffb-8b42-88feca31263f-inventory\") pod \"ebb9744c-67b7-4ffb-8b42-88feca31263f\" (UID: \"ebb9744c-67b7-4ffb-8b42-88feca31263f\") " Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.299212 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2jbp\" (UniqueName: \"kubernetes.io/projected/ebb9744c-67b7-4ffb-8b42-88feca31263f-kube-api-access-c2jbp\") pod \"ebb9744c-67b7-4ffb-8b42-88feca31263f\" (UID: \"ebb9744c-67b7-4ffb-8b42-88feca31263f\") " Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.304645 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebb9744c-67b7-4ffb-8b42-88feca31263f-kube-api-access-c2jbp" (OuterVolumeSpecName: "kube-api-access-c2jbp") pod "ebb9744c-67b7-4ffb-8b42-88feca31263f" (UID: "ebb9744c-67b7-4ffb-8b42-88feca31263f"). InnerVolumeSpecName "kube-api-access-c2jbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.325726 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebb9744c-67b7-4ffb-8b42-88feca31263f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ebb9744c-67b7-4ffb-8b42-88feca31263f" (UID: "ebb9744c-67b7-4ffb-8b42-88feca31263f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.326534 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebb9744c-67b7-4ffb-8b42-88feca31263f-inventory" (OuterVolumeSpecName: "inventory") pod "ebb9744c-67b7-4ffb-8b42-88feca31263f" (UID: "ebb9744c-67b7-4ffb-8b42-88feca31263f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.403115 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2jbp\" (UniqueName: \"kubernetes.io/projected/ebb9744c-67b7-4ffb-8b42-88feca31263f-kube-api-access-c2jbp\") on node \"crc\" DevicePath \"\"" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.403194 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ebb9744c-67b7-4ffb-8b42-88feca31263f-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.403216 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ebb9744c-67b7-4ffb-8b42-88feca31263f-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.649904 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" event={"ID":"ebb9744c-67b7-4ffb-8b42-88feca31263f","Type":"ContainerDied","Data":"bbf355b7279059c082fa4d8b22cf512de40884c636b11489bf6b7bb0d9017b36"} Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.649942 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.649972 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bbf355b7279059c082fa4d8b22cf512de40884c636b11489bf6b7bb0d9017b36" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.778794 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-glgsr"] Dec 10 01:10:36 crc kubenswrapper[4884]: E1210 01:10:36.779407 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a880e33-5286-43f9-86bd-54751e5212b3" containerName="extract-utilities" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.779461 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a880e33-5286-43f9-86bd-54751e5212b3" containerName="extract-utilities" Dec 10 01:10:36 crc kubenswrapper[4884]: E1210 01:10:36.779484 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebb9744c-67b7-4ffb-8b42-88feca31263f" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.779499 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebb9744c-67b7-4ffb-8b42-88feca31263f" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 10 01:10:36 crc kubenswrapper[4884]: E1210 01:10:36.779534 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a880e33-5286-43f9-86bd-54751e5212b3" containerName="extract-content" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.779547 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a880e33-5286-43f9-86bd-54751e5212b3" containerName="extract-content" Dec 10 01:10:36 crc kubenswrapper[4884]: E1210 01:10:36.779567 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a880e33-5286-43f9-86bd-54751e5212b3" containerName="registry-server" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.779579 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a880e33-5286-43f9-86bd-54751e5212b3" containerName="registry-server" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.779941 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a880e33-5286-43f9-86bd-54751e5212b3" containerName="registry-server" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.779980 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebb9744c-67b7-4ffb-8b42-88feca31263f" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.781196 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.791359 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.791690 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.791788 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.791907 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.818304 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-glgsr"] Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.912096 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-glgsr\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.912424 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhf2q\" (UniqueName: \"kubernetes.io/projected/e3b40db4-283d-45ba-85c0-31e649ccdfb2-kube-api-access-zhf2q\") pod \"ssh-known-hosts-edpm-deployment-glgsr\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:36 crc kubenswrapper[4884]: I1210 01:10:36.912497 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-glgsr\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:37 crc kubenswrapper[4884]: I1210 01:10:37.014259 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-glgsr\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:37 crc kubenswrapper[4884]: I1210 01:10:37.014333 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhf2q\" (UniqueName: \"kubernetes.io/projected/e3b40db4-283d-45ba-85c0-31e649ccdfb2-kube-api-access-zhf2q\") pod \"ssh-known-hosts-edpm-deployment-glgsr\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:37 crc kubenswrapper[4884]: I1210 01:10:37.014355 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-glgsr\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:37 crc kubenswrapper[4884]: I1210 01:10:37.019341 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-glgsr\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:37 crc kubenswrapper[4884]: I1210 01:10:37.019460 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-glgsr\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:37 crc kubenswrapper[4884]: I1210 01:10:37.043214 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhf2q\" (UniqueName: \"kubernetes.io/projected/e3b40db4-283d-45ba-85c0-31e649ccdfb2-kube-api-access-zhf2q\") pod \"ssh-known-hosts-edpm-deployment-glgsr\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:37 crc kubenswrapper[4884]: I1210 01:10:37.112663 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:37 crc kubenswrapper[4884]: W1210 01:10:37.692999 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3b40db4_283d_45ba_85c0_31e649ccdfb2.slice/crio-f327b1f0ba3d575654bd153d7beaa289fab0ccadb2ade470ae3c3a5b43e7111e WatchSource:0}: Error finding container f327b1f0ba3d575654bd153d7beaa289fab0ccadb2ade470ae3c3a5b43e7111e: Status 404 returned error can't find the container with id f327b1f0ba3d575654bd153d7beaa289fab0ccadb2ade470ae3c3a5b43e7111e Dec 10 01:10:37 crc kubenswrapper[4884]: I1210 01:10:37.698649 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-glgsr"] Dec 10 01:10:38 crc kubenswrapper[4884]: I1210 01:10:38.677677 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" event={"ID":"e3b40db4-283d-45ba-85c0-31e649ccdfb2","Type":"ContainerStarted","Data":"f327b1f0ba3d575654bd153d7beaa289fab0ccadb2ade470ae3c3a5b43e7111e"} Dec 10 01:10:39 crc kubenswrapper[4884]: I1210 01:10:39.297032 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:10:39 crc kubenswrapper[4884]: E1210 01:10:39.297634 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:10:39 crc kubenswrapper[4884]: E1210 01:10:39.298883 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:10:39 crc kubenswrapper[4884]: E1210 01:10:39.299170 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:10:39 crc kubenswrapper[4884]: I1210 01:10:39.692080 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" event={"ID":"e3b40db4-283d-45ba-85c0-31e649ccdfb2","Type":"ContainerStarted","Data":"c284ef85f1a1cc46c664f152b336782ebc8f523fb38c4a53a1190e4bc7b33336"} Dec 10 01:10:39 crc kubenswrapper[4884]: I1210 01:10:39.721382 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" podStartSLOduration=3.017116864 podStartE2EDuration="3.721311083s" podCreationTimestamp="2025-12-10 01:10:36 +0000 UTC" firstStartedPulling="2025-12-10 01:10:37.69669956 +0000 UTC m=+2410.774656677" lastFinishedPulling="2025-12-10 01:10:38.400893739 +0000 UTC m=+2411.478850896" observedRunningTime="2025-12-10 01:10:39.715324691 +0000 UTC m=+2412.793281798" watchObservedRunningTime="2025-12-10 01:10:39.721311083 +0000 UTC m=+2412.799268200" Dec 10 01:10:46 crc kubenswrapper[4884]: I1210 01:10:46.776278 4884 generic.go:334] "Generic (PLEG): container finished" podID="e3b40db4-283d-45ba-85c0-31e649ccdfb2" containerID="c284ef85f1a1cc46c664f152b336782ebc8f523fb38c4a53a1190e4bc7b33336" exitCode=0 Dec 10 01:10:46 crc kubenswrapper[4884]: I1210 01:10:46.776465 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" event={"ID":"e3b40db4-283d-45ba-85c0-31e649ccdfb2","Type":"ContainerDied","Data":"c284ef85f1a1cc46c664f152b336782ebc8f523fb38c4a53a1190e4bc7b33336"} Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.279561 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.366002 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-inventory-0\") pod \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.366048 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-ssh-key-openstack-edpm-ipam\") pod \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.366352 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhf2q\" (UniqueName: \"kubernetes.io/projected/e3b40db4-283d-45ba-85c0-31e649ccdfb2-kube-api-access-zhf2q\") pod \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.371313 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3b40db4-283d-45ba-85c0-31e649ccdfb2-kube-api-access-zhf2q" (OuterVolumeSpecName: "kube-api-access-zhf2q") pod "e3b40db4-283d-45ba-85c0-31e649ccdfb2" (UID: "e3b40db4-283d-45ba-85c0-31e649ccdfb2"). InnerVolumeSpecName "kube-api-access-zhf2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:10:48 crc kubenswrapper[4884]: E1210 01:10:48.391691 4884 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-inventory-0 podName:e3b40db4-283d-45ba-85c0-31e649ccdfb2 nodeName:}" failed. No retries permitted until 2025-12-10 01:10:48.891663067 +0000 UTC m=+2421.969620194 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "inventory-0" (UniqueName: "kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-inventory-0") pod "e3b40db4-283d-45ba-85c0-31e649ccdfb2" (UID: "e3b40db4-283d-45ba-85c0-31e649ccdfb2") : error deleting /var/lib/kubelet/pods/e3b40db4-283d-45ba-85c0-31e649ccdfb2/volume-subpaths: remove /var/lib/kubelet/pods/e3b40db4-283d-45ba-85c0-31e649ccdfb2/volume-subpaths: no such file or directory Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.394131 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e3b40db4-283d-45ba-85c0-31e649ccdfb2" (UID: "e3b40db4-283d-45ba-85c0-31e649ccdfb2"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.469547 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhf2q\" (UniqueName: \"kubernetes.io/projected/e3b40db4-283d-45ba-85c0-31e649ccdfb2-kube-api-access-zhf2q\") on node \"crc\" DevicePath \"\"" Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.469595 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.805836 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" event={"ID":"e3b40db4-283d-45ba-85c0-31e649ccdfb2","Type":"ContainerDied","Data":"f327b1f0ba3d575654bd153d7beaa289fab0ccadb2ade470ae3c3a5b43e7111e"} Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.806330 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f327b1f0ba3d575654bd153d7beaa289fab0ccadb2ade470ae3c3a5b43e7111e" Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.805881 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-glgsr" Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.960276 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln"] Dec 10 01:10:48 crc kubenswrapper[4884]: E1210 01:10:48.960688 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3b40db4-283d-45ba-85c0-31e649ccdfb2" containerName="ssh-known-hosts-edpm-deployment" Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.960706 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3b40db4-283d-45ba-85c0-31e649ccdfb2" containerName="ssh-known-hosts-edpm-deployment" Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.960909 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3b40db4-283d-45ba-85c0-31e649ccdfb2" containerName="ssh-known-hosts-edpm-deployment" Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.961594 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.980858 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln"] Dec 10 01:10:48 crc kubenswrapper[4884]: I1210 01:10:48.990206 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-inventory-0\") pod \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\" (UID: \"e3b40db4-283d-45ba-85c0-31e649ccdfb2\") " Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.003114 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "e3b40db4-283d-45ba-85c0-31e649ccdfb2" (UID: "e3b40db4-283d-45ba-85c0-31e649ccdfb2"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.103760 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjvqm\" (UniqueName: \"kubernetes.io/projected/b3dddf04-7667-4d75-8532-12a3deb1e77c-kube-api-access-sjvqm\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-57rln\" (UID: \"b3dddf04-7667-4d75-8532-12a3deb1e77c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.104157 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3dddf04-7667-4d75-8532-12a3deb1e77c-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-57rln\" (UID: \"b3dddf04-7667-4d75-8532-12a3deb1e77c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.104468 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3dddf04-7667-4d75-8532-12a3deb1e77c-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-57rln\" (UID: \"b3dddf04-7667-4d75-8532-12a3deb1e77c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.105169 4884 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/e3b40db4-283d-45ba-85c0-31e649ccdfb2-inventory-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.206825 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3dddf04-7667-4d75-8532-12a3deb1e77c-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-57rln\" (UID: \"b3dddf04-7667-4d75-8532-12a3deb1e77c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.206910 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3dddf04-7667-4d75-8532-12a3deb1e77c-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-57rln\" (UID: \"b3dddf04-7667-4d75-8532-12a3deb1e77c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.206971 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjvqm\" (UniqueName: \"kubernetes.io/projected/b3dddf04-7667-4d75-8532-12a3deb1e77c-kube-api-access-sjvqm\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-57rln\" (UID: \"b3dddf04-7667-4d75-8532-12a3deb1e77c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.211987 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3dddf04-7667-4d75-8532-12a3deb1e77c-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-57rln\" (UID: \"b3dddf04-7667-4d75-8532-12a3deb1e77c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.212661 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3dddf04-7667-4d75-8532-12a3deb1e77c-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-57rln\" (UID: \"b3dddf04-7667-4d75-8532-12a3deb1e77c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.228712 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjvqm\" (UniqueName: \"kubernetes.io/projected/b3dddf04-7667-4d75-8532-12a3deb1e77c-kube-api-access-sjvqm\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-57rln\" (UID: \"b3dddf04-7667-4d75-8532-12a3deb1e77c\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.290306 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:10:49 crc kubenswrapper[4884]: I1210 01:10:49.951415 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln"] Dec 10 01:10:50 crc kubenswrapper[4884]: I1210 01:10:50.838603 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" event={"ID":"b3dddf04-7667-4d75-8532-12a3deb1e77c","Type":"ContainerStarted","Data":"8d3b0cffa28924fdac0901b3f5a3099a7f53d4dbff4b0e341b9f04aca03bdfc9"} Dec 10 01:10:50 crc kubenswrapper[4884]: I1210 01:10:50.839175 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" event={"ID":"b3dddf04-7667-4d75-8532-12a3deb1e77c","Type":"ContainerStarted","Data":"4fd3a65f29b7b45dee1d5991470aaf3b2ef093794bb6ed50a8289ec27f7ad501"} Dec 10 01:10:50 crc kubenswrapper[4884]: I1210 01:10:50.863855 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" podStartSLOduration=2.345443182 podStartE2EDuration="2.863836268s" podCreationTimestamp="2025-12-10 01:10:48 +0000 UTC" firstStartedPulling="2025-12-10 01:10:49.952979389 +0000 UTC m=+2423.030936516" lastFinishedPulling="2025-12-10 01:10:50.471372445 +0000 UTC m=+2423.549329602" observedRunningTime="2025-12-10 01:10:50.863372505 +0000 UTC m=+2423.941329632" watchObservedRunningTime="2025-12-10 01:10:50.863836268 +0000 UTC m=+2423.941793395" Dec 10 01:10:52 crc kubenswrapper[4884]: I1210 01:10:52.287224 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:10:52 crc kubenswrapper[4884]: E1210 01:10:52.287846 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:10:52 crc kubenswrapper[4884]: E1210 01:10:52.290203 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:10:53 crc kubenswrapper[4884]: E1210 01:10:53.290372 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:11:01 crc kubenswrapper[4884]: I1210 01:11:01.984360 4884 generic.go:334] "Generic (PLEG): container finished" podID="b3dddf04-7667-4d75-8532-12a3deb1e77c" containerID="8d3b0cffa28924fdac0901b3f5a3099a7f53d4dbff4b0e341b9f04aca03bdfc9" exitCode=0 Dec 10 01:11:01 crc kubenswrapper[4884]: I1210 01:11:01.984515 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" event={"ID":"b3dddf04-7667-4d75-8532-12a3deb1e77c","Type":"ContainerDied","Data":"8d3b0cffa28924fdac0901b3f5a3099a7f53d4dbff4b0e341b9f04aca03bdfc9"} Dec 10 01:11:03 crc kubenswrapper[4884]: I1210 01:11:03.508777 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:11:03 crc kubenswrapper[4884]: I1210 01:11:03.539398 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3dddf04-7667-4d75-8532-12a3deb1e77c-ssh-key\") pod \"b3dddf04-7667-4d75-8532-12a3deb1e77c\" (UID: \"b3dddf04-7667-4d75-8532-12a3deb1e77c\") " Dec 10 01:11:03 crc kubenswrapper[4884]: I1210 01:11:03.539601 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjvqm\" (UniqueName: \"kubernetes.io/projected/b3dddf04-7667-4d75-8532-12a3deb1e77c-kube-api-access-sjvqm\") pod \"b3dddf04-7667-4d75-8532-12a3deb1e77c\" (UID: \"b3dddf04-7667-4d75-8532-12a3deb1e77c\") " Dec 10 01:11:03 crc kubenswrapper[4884]: I1210 01:11:03.539690 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3dddf04-7667-4d75-8532-12a3deb1e77c-inventory\") pod \"b3dddf04-7667-4d75-8532-12a3deb1e77c\" (UID: \"b3dddf04-7667-4d75-8532-12a3deb1e77c\") " Dec 10 01:11:03 crc kubenswrapper[4884]: I1210 01:11:03.544508 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3dddf04-7667-4d75-8532-12a3deb1e77c-kube-api-access-sjvqm" (OuterVolumeSpecName: "kube-api-access-sjvqm") pod "b3dddf04-7667-4d75-8532-12a3deb1e77c" (UID: "b3dddf04-7667-4d75-8532-12a3deb1e77c"). InnerVolumeSpecName "kube-api-access-sjvqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:11:03 crc kubenswrapper[4884]: I1210 01:11:03.574047 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3dddf04-7667-4d75-8532-12a3deb1e77c-inventory" (OuterVolumeSpecName: "inventory") pod "b3dddf04-7667-4d75-8532-12a3deb1e77c" (UID: "b3dddf04-7667-4d75-8532-12a3deb1e77c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:11:03 crc kubenswrapper[4884]: I1210 01:11:03.575704 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3dddf04-7667-4d75-8532-12a3deb1e77c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b3dddf04-7667-4d75-8532-12a3deb1e77c" (UID: "b3dddf04-7667-4d75-8532-12a3deb1e77c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:11:03 crc kubenswrapper[4884]: I1210 01:11:03.641605 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b3dddf04-7667-4d75-8532-12a3deb1e77c-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:11:03 crc kubenswrapper[4884]: I1210 01:11:03.641635 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b3dddf04-7667-4d75-8532-12a3deb1e77c-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:11:03 crc kubenswrapper[4884]: I1210 01:11:03.641644 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjvqm\" (UniqueName: \"kubernetes.io/projected/b3dddf04-7667-4d75-8532-12a3deb1e77c-kube-api-access-sjvqm\") on node \"crc\" DevicePath \"\"" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.007357 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" event={"ID":"b3dddf04-7667-4d75-8532-12a3deb1e77c","Type":"ContainerDied","Data":"4fd3a65f29b7b45dee1d5991470aaf3b2ef093794bb6ed50a8289ec27f7ad501"} Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.007642 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4fd3a65f29b7b45dee1d5991470aaf3b2ef093794bb6ed50a8289ec27f7ad501" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.007451 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-57rln" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.074779 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl"] Dec 10 01:11:04 crc kubenswrapper[4884]: E1210 01:11:04.075193 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3dddf04-7667-4d75-8532-12a3deb1e77c" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.075212 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3dddf04-7667-4d75-8532-12a3deb1e77c" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.075454 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3dddf04-7667-4d75-8532-12a3deb1e77c" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.076183 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.078186 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.078373 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.079393 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.079679 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.091703 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl"] Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.150554 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p79zs\" (UniqueName: \"kubernetes.io/projected/acc4e8e3-089c-4bb1-935a-f1597f0e973c-kube-api-access-p79zs\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl\" (UID: \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.150635 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/acc4e8e3-089c-4bb1-935a-f1597f0e973c-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl\" (UID: \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.150871 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/acc4e8e3-089c-4bb1-935a-f1597f0e973c-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl\" (UID: \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.253312 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/acc4e8e3-089c-4bb1-935a-f1597f0e973c-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl\" (UID: \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.253515 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p79zs\" (UniqueName: \"kubernetes.io/projected/acc4e8e3-089c-4bb1-935a-f1597f0e973c-kube-api-access-p79zs\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl\" (UID: \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.253615 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/acc4e8e3-089c-4bb1-935a-f1597f0e973c-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl\" (UID: \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.257586 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/acc4e8e3-089c-4bb1-935a-f1597f0e973c-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl\" (UID: \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.258736 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/acc4e8e3-089c-4bb1-935a-f1597f0e973c-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl\" (UID: \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.270138 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p79zs\" (UniqueName: \"kubernetes.io/projected/acc4e8e3-089c-4bb1-935a-f1597f0e973c-kube-api-access-p79zs\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl\" (UID: \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:04 crc kubenswrapper[4884]: I1210 01:11:04.393241 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:05 crc kubenswrapper[4884]: I1210 01:11:05.005237 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl"] Dec 10 01:11:05 crc kubenswrapper[4884]: I1210 01:11:05.016370 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" event={"ID":"acc4e8e3-089c-4bb1-935a-f1597f0e973c","Type":"ContainerStarted","Data":"9ecc1a753635f1f1253a739b254163690c95dcc2492b864e6673f0b492dd96a8"} Dec 10 01:11:05 crc kubenswrapper[4884]: E1210 01:11:05.290033 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:11:06 crc kubenswrapper[4884]: I1210 01:11:06.286981 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:11:06 crc kubenswrapper[4884]: E1210 01:11:06.288010 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:11:06 crc kubenswrapper[4884]: E1210 01:11:06.288549 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:11:07 crc kubenswrapper[4884]: I1210 01:11:07.053687 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" event={"ID":"acc4e8e3-089c-4bb1-935a-f1597f0e973c","Type":"ContainerStarted","Data":"b6cd21021caef12f9e0ac15872e85c4d2ebe7fa48f051dbbdb46f1769737aca7"} Dec 10 01:11:07 crc kubenswrapper[4884]: I1210 01:11:07.075708 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" podStartSLOduration=2.187186176 podStartE2EDuration="3.075691259s" podCreationTimestamp="2025-12-10 01:11:04 +0000 UTC" firstStartedPulling="2025-12-10 01:11:05.001035248 +0000 UTC m=+2438.078992365" lastFinishedPulling="2025-12-10 01:11:05.889540291 +0000 UTC m=+2438.967497448" observedRunningTime="2025-12-10 01:11:07.072525213 +0000 UTC m=+2440.150482340" watchObservedRunningTime="2025-12-10 01:11:07.075691259 +0000 UTC m=+2440.153648376" Dec 10 01:11:16 crc kubenswrapper[4884]: I1210 01:11:16.155744 4884 generic.go:334] "Generic (PLEG): container finished" podID="acc4e8e3-089c-4bb1-935a-f1597f0e973c" containerID="b6cd21021caef12f9e0ac15872e85c4d2ebe7fa48f051dbbdb46f1769737aca7" exitCode=0 Dec 10 01:11:16 crc kubenswrapper[4884]: I1210 01:11:16.155863 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" event={"ID":"acc4e8e3-089c-4bb1-935a-f1597f0e973c","Type":"ContainerDied","Data":"b6cd21021caef12f9e0ac15872e85c4d2ebe7fa48f051dbbdb46f1769737aca7"} Dec 10 01:11:16 crc kubenswrapper[4884]: E1210 01:11:16.289873 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:11:17 crc kubenswrapper[4884]: I1210 01:11:17.659529 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:17 crc kubenswrapper[4884]: I1210 01:11:17.769945 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/acc4e8e3-089c-4bb1-935a-f1597f0e973c-inventory\") pod \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\" (UID: \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\") " Dec 10 01:11:17 crc kubenswrapper[4884]: I1210 01:11:17.770024 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/acc4e8e3-089c-4bb1-935a-f1597f0e973c-ssh-key\") pod \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\" (UID: \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\") " Dec 10 01:11:17 crc kubenswrapper[4884]: I1210 01:11:17.770067 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p79zs\" (UniqueName: \"kubernetes.io/projected/acc4e8e3-089c-4bb1-935a-f1597f0e973c-kube-api-access-p79zs\") pod \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\" (UID: \"acc4e8e3-089c-4bb1-935a-f1597f0e973c\") " Dec 10 01:11:17 crc kubenswrapper[4884]: I1210 01:11:17.789754 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acc4e8e3-089c-4bb1-935a-f1597f0e973c-kube-api-access-p79zs" (OuterVolumeSpecName: "kube-api-access-p79zs") pod "acc4e8e3-089c-4bb1-935a-f1597f0e973c" (UID: "acc4e8e3-089c-4bb1-935a-f1597f0e973c"). InnerVolumeSpecName "kube-api-access-p79zs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:11:17 crc kubenswrapper[4884]: I1210 01:11:17.811735 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/acc4e8e3-089c-4bb1-935a-f1597f0e973c-inventory" (OuterVolumeSpecName: "inventory") pod "acc4e8e3-089c-4bb1-935a-f1597f0e973c" (UID: "acc4e8e3-089c-4bb1-935a-f1597f0e973c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:11:17 crc kubenswrapper[4884]: I1210 01:11:17.813034 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/acc4e8e3-089c-4bb1-935a-f1597f0e973c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "acc4e8e3-089c-4bb1-935a-f1597f0e973c" (UID: "acc4e8e3-089c-4bb1-935a-f1597f0e973c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:11:17 crc kubenswrapper[4884]: I1210 01:11:17.874029 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/acc4e8e3-089c-4bb1-935a-f1597f0e973c-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:11:17 crc kubenswrapper[4884]: I1210 01:11:17.874512 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/acc4e8e3-089c-4bb1-935a-f1597f0e973c-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:11:17 crc kubenswrapper[4884]: I1210 01:11:17.874723 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p79zs\" (UniqueName: \"kubernetes.io/projected/acc4e8e3-089c-4bb1-935a-f1597f0e973c-kube-api-access-p79zs\") on node \"crc\" DevicePath \"\"" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.181568 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" event={"ID":"acc4e8e3-089c-4bb1-935a-f1597f0e973c","Type":"ContainerDied","Data":"9ecc1a753635f1f1253a739b254163690c95dcc2492b864e6673f0b492dd96a8"} Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.181614 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ecc1a753635f1f1253a739b254163690c95dcc2492b864e6673f0b492dd96a8" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.181617 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.273745 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf"] Dec 10 01:11:18 crc kubenswrapper[4884]: E1210 01:11:18.274812 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acc4e8e3-089c-4bb1-935a-f1597f0e973c" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.274838 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="acc4e8e3-089c-4bb1-935a-f1597f0e973c" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.275108 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="acc4e8e3-089c-4bb1-935a-f1597f0e973c" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.276239 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.283090 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.283382 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.283811 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.283999 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.284059 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.284398 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.284701 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.285923 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.286770 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:11:18 crc kubenswrapper[4884]: E1210 01:11:18.287232 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.288306 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf"] Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.384547 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.385031 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.385186 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.385352 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.385587 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.385819 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.386048 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.386247 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.386421 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.386583 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.386732 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.386940 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.387124 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x27zq\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-kube-api-access-x27zq\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.488552 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.488943 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.489019 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.489093 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.489175 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.489305 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.489470 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.489569 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.489655 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.489732 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.489810 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.489906 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.489991 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x27zq\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-kube-api-access-x27zq\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.500197 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.502012 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.509948 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-telemetry-power-monitoring-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.516286 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.517275 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.519097 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.526253 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.526786 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.527173 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.528835 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.536110 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x27zq\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-kube-api-access-x27zq\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.539954 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.541602 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-swpbf\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:18 crc kubenswrapper[4884]: I1210 01:11:18.614332 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:11:19 crc kubenswrapper[4884]: I1210 01:11:19.180832 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf"] Dec 10 01:11:20 crc kubenswrapper[4884]: I1210 01:11:20.224399 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" event={"ID":"d2403845-7f66-49c3-8f0b-fd8bad801b5a","Type":"ContainerStarted","Data":"58cd680b281207bdd19aecd0360ba33027d3fb310f73d858c4a6db7ad851ef2a"} Dec 10 01:11:20 crc kubenswrapper[4884]: E1210 01:11:20.297153 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:11:21 crc kubenswrapper[4884]: I1210 01:11:21.240870 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" event={"ID":"d2403845-7f66-49c3-8f0b-fd8bad801b5a","Type":"ContainerStarted","Data":"8ade544cf5cc761ef81151762ab43ec885100c2691ffec90c7455eb934da8545"} Dec 10 01:11:21 crc kubenswrapper[4884]: I1210 01:11:21.267469 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" podStartSLOduration=1.9862490670000001 podStartE2EDuration="3.26745106s" podCreationTimestamp="2025-12-10 01:11:18 +0000 UTC" firstStartedPulling="2025-12-10 01:11:19.190054235 +0000 UTC m=+2452.268011352" lastFinishedPulling="2025-12-10 01:11:20.471256188 +0000 UTC m=+2453.549213345" observedRunningTime="2025-12-10 01:11:21.261570741 +0000 UTC m=+2454.339527868" watchObservedRunningTime="2025-12-10 01:11:21.26745106 +0000 UTC m=+2454.345408177" Dec 10 01:11:27 crc kubenswrapper[4884]: E1210 01:11:27.324926 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:11:32 crc kubenswrapper[4884]: I1210 01:11:32.287285 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:11:32 crc kubenswrapper[4884]: E1210 01:11:32.288016 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:11:33 crc kubenswrapper[4884]: E1210 01:11:33.290095 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:11:40 crc kubenswrapper[4884]: E1210 01:11:40.290897 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:11:43 crc kubenswrapper[4884]: I1210 01:11:43.287764 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:11:43 crc kubenswrapper[4884]: E1210 01:11:43.288698 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:11:44 crc kubenswrapper[4884]: E1210 01:11:44.292040 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:11:52 crc kubenswrapper[4884]: E1210 01:11:52.289637 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:11:54 crc kubenswrapper[4884]: I1210 01:11:54.297952 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:11:54 crc kubenswrapper[4884]: E1210 01:11:54.299049 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:11:55 crc kubenswrapper[4884]: E1210 01:11:55.289160 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:11:59 crc kubenswrapper[4884]: I1210 01:11:59.711607 4884 generic.go:334] "Generic (PLEG): container finished" podID="d2403845-7f66-49c3-8f0b-fd8bad801b5a" containerID="8ade544cf5cc761ef81151762ab43ec885100c2691ffec90c7455eb934da8545" exitCode=0 Dec 10 01:11:59 crc kubenswrapper[4884]: I1210 01:11:59.711698 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" event={"ID":"d2403845-7f66-49c3-8f0b-fd8bad801b5a","Type":"ContainerDied","Data":"8ade544cf5cc761ef81151762ab43ec885100c2691ffec90c7455eb934da8545"} Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.230729 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.401831 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-inventory\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.401896 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-bootstrap-combined-ca-bundle\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.401968 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x27zq\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-kube-api-access-x27zq\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.402032 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.402068 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-telemetry-combined-ca-bundle\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.402141 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-telemetry-power-monitoring-combined-ca-bundle\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.402159 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-ovn-combined-ca-bundle\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.402201 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-ovn-default-certs-0\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.402223 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-libvirt-combined-ca-bundle\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.402243 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.402317 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-repo-setup-combined-ca-bundle\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.402360 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.402391 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-ssh-key\") pod \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\" (UID: \"d2403845-7f66-49c3-8f0b-fd8bad801b5a\") " Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.408100 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.408464 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-telemetry-power-monitoring-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-power-monitoring-combined-ca-bundle") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "telemetry-power-monitoring-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.409132 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.409175 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.409221 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.409808 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.411756 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.412043 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.412628 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.412810 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.426145 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-kube-api-access-x27zq" (OuterVolumeSpecName: "kube-api-access-x27zq") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "kube-api-access-x27zq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.435322 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-inventory" (OuterVolumeSpecName: "inventory") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.440424 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d2403845-7f66-49c3-8f0b-fd8bad801b5a" (UID: "d2403845-7f66-49c3-8f0b-fd8bad801b5a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505075 4884 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-telemetry-power-monitoring-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505123 4884 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505139 4884 reconciler_common.go:293] "Volume detached for volume \"telemetry-power-monitoring-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-telemetry-power-monitoring-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505152 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505168 4884 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505183 4884 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505196 4884 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505209 4884 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505221 4884 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505233 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505245 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505256 4884 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2403845-7f66-49c3-8f0b-fd8bad801b5a-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.505268 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x27zq\" (UniqueName: \"kubernetes.io/projected/d2403845-7f66-49c3-8f0b-fd8bad801b5a-kube-api-access-x27zq\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.763629 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" event={"ID":"d2403845-7f66-49c3-8f0b-fd8bad801b5a","Type":"ContainerDied","Data":"58cd680b281207bdd19aecd0360ba33027d3fb310f73d858c4a6db7ad851ef2a"} Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.763962 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="58cd680b281207bdd19aecd0360ba33027d3fb310f73d858c4a6db7ad851ef2a" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.763737 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-swpbf" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.850250 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf"] Dec 10 01:12:01 crc kubenswrapper[4884]: E1210 01:12:01.850671 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2403845-7f66-49c3-8f0b-fd8bad801b5a" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.850690 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2403845-7f66-49c3-8f0b-fd8bad801b5a" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.850928 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2403845-7f66-49c3-8f0b-fd8bad801b5a" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.851671 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.856903 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.856958 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.857131 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.857153 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.857355 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:12:01 crc kubenswrapper[4884]: I1210 01:12:01.864915 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf"] Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.014468 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.014568 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/14b622f8-c484-476b-8024-8c1afeef15c2-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.014673 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.014703 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48fxf\" (UniqueName: \"kubernetes.io/projected/14b622f8-c484-476b-8024-8c1afeef15c2-kube-api-access-48fxf\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.014774 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.117191 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.117298 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/14b622f8-c484-476b-8024-8c1afeef15c2-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.117397 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.117448 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48fxf\" (UniqueName: \"kubernetes.io/projected/14b622f8-c484-476b-8024-8c1afeef15c2-kube-api-access-48fxf\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.117513 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.118450 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/14b622f8-c484-476b-8024-8c1afeef15c2-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.122792 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.125075 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.127318 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.141398 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48fxf\" (UniqueName: \"kubernetes.io/projected/14b622f8-c484-476b-8024-8c1afeef15c2-kube-api-access-48fxf\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-h92sf\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.179741 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:12:02 crc kubenswrapper[4884]: I1210 01:12:02.829137 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf"] Dec 10 01:12:02 crc kubenswrapper[4884]: W1210 01:12:02.836682 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14b622f8_c484_476b_8024_8c1afeef15c2.slice/crio-8823aaf8e2b881683c3565dbacf4e77b021b7daed39cb2ebda0ec4c401aec9c9 WatchSource:0}: Error finding container 8823aaf8e2b881683c3565dbacf4e77b021b7daed39cb2ebda0ec4c401aec9c9: Status 404 returned error can't find the container with id 8823aaf8e2b881683c3565dbacf4e77b021b7daed39cb2ebda0ec4c401aec9c9 Dec 10 01:12:03 crc kubenswrapper[4884]: I1210 01:12:03.792590 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" event={"ID":"14b622f8-c484-476b-8024-8c1afeef15c2","Type":"ContainerStarted","Data":"4ac664e610e32f8a6c1344f54a15968b0088ccad4a75b0d4e1fe9331f5574c34"} Dec 10 01:12:03 crc kubenswrapper[4884]: I1210 01:12:03.793348 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" event={"ID":"14b622f8-c484-476b-8024-8c1afeef15c2","Type":"ContainerStarted","Data":"8823aaf8e2b881683c3565dbacf4e77b021b7daed39cb2ebda0ec4c401aec9c9"} Dec 10 01:12:03 crc kubenswrapper[4884]: I1210 01:12:03.813775 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" podStartSLOduration=2.248241652 podStartE2EDuration="2.81375866s" podCreationTimestamp="2025-12-10 01:12:01 +0000 UTC" firstStartedPulling="2025-12-10 01:12:02.842459209 +0000 UTC m=+2495.920416326" lastFinishedPulling="2025-12-10 01:12:03.407976207 +0000 UTC m=+2496.485933334" observedRunningTime="2025-12-10 01:12:03.813075882 +0000 UTC m=+2496.891033029" watchObservedRunningTime="2025-12-10 01:12:03.81375866 +0000 UTC m=+2496.891715787" Dec 10 01:12:06 crc kubenswrapper[4884]: I1210 01:12:06.287283 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:12:06 crc kubenswrapper[4884]: E1210 01:12:06.287858 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:12:07 crc kubenswrapper[4884]: E1210 01:12:07.309797 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:12:09 crc kubenswrapper[4884]: E1210 01:12:09.290135 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.482710 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wt8wg"] Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.486711 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.498628 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wt8wg"] Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.579328 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d98d64df-3ea1-4df4-ad05-397c29b53117-catalog-content\") pod \"redhat-marketplace-wt8wg\" (UID: \"d98d64df-3ea1-4df4-ad05-397c29b53117\") " pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.579375 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frgbm\" (UniqueName: \"kubernetes.io/projected/d98d64df-3ea1-4df4-ad05-397c29b53117-kube-api-access-frgbm\") pod \"redhat-marketplace-wt8wg\" (UID: \"d98d64df-3ea1-4df4-ad05-397c29b53117\") " pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.579425 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d98d64df-3ea1-4df4-ad05-397c29b53117-utilities\") pod \"redhat-marketplace-wt8wg\" (UID: \"d98d64df-3ea1-4df4-ad05-397c29b53117\") " pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.681242 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d98d64df-3ea1-4df4-ad05-397c29b53117-catalog-content\") pod \"redhat-marketplace-wt8wg\" (UID: \"d98d64df-3ea1-4df4-ad05-397c29b53117\") " pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.681293 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frgbm\" (UniqueName: \"kubernetes.io/projected/d98d64df-3ea1-4df4-ad05-397c29b53117-kube-api-access-frgbm\") pod \"redhat-marketplace-wt8wg\" (UID: \"d98d64df-3ea1-4df4-ad05-397c29b53117\") " pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.681355 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d98d64df-3ea1-4df4-ad05-397c29b53117-utilities\") pod \"redhat-marketplace-wt8wg\" (UID: \"d98d64df-3ea1-4df4-ad05-397c29b53117\") " pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.681973 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d98d64df-3ea1-4df4-ad05-397c29b53117-utilities\") pod \"redhat-marketplace-wt8wg\" (UID: \"d98d64df-3ea1-4df4-ad05-397c29b53117\") " pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.681997 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d98d64df-3ea1-4df4-ad05-397c29b53117-catalog-content\") pod \"redhat-marketplace-wt8wg\" (UID: \"d98d64df-3ea1-4df4-ad05-397c29b53117\") " pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.704694 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frgbm\" (UniqueName: \"kubernetes.io/projected/d98d64df-3ea1-4df4-ad05-397c29b53117-kube-api-access-frgbm\") pod \"redhat-marketplace-wt8wg\" (UID: \"d98d64df-3ea1-4df4-ad05-397c29b53117\") " pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:17 crc kubenswrapper[4884]: I1210 01:12:17.827382 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:18 crc kubenswrapper[4884]: I1210 01:12:18.332912 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wt8wg"] Dec 10 01:12:18 crc kubenswrapper[4884]: W1210 01:12:18.342795 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd98d64df_3ea1_4df4_ad05_397c29b53117.slice/crio-9dbd0cc17bb7fac132d1829988d649f2c60a8d01bd7f3fd10df7dc0f35a46ae7 WatchSource:0}: Error finding container 9dbd0cc17bb7fac132d1829988d649f2c60a8d01bd7f3fd10df7dc0f35a46ae7: Status 404 returned error can't find the container with id 9dbd0cc17bb7fac132d1829988d649f2c60a8d01bd7f3fd10df7dc0f35a46ae7 Dec 10 01:12:18 crc kubenswrapper[4884]: I1210 01:12:18.982179 4884 generic.go:334] "Generic (PLEG): container finished" podID="d98d64df-3ea1-4df4-ad05-397c29b53117" containerID="db947d6676d26af2a74a1780d3c2aff601727a2dc5dd3b9ea985db82ca1225ea" exitCode=0 Dec 10 01:12:18 crc kubenswrapper[4884]: I1210 01:12:18.982338 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wt8wg" event={"ID":"d98d64df-3ea1-4df4-ad05-397c29b53117","Type":"ContainerDied","Data":"db947d6676d26af2a74a1780d3c2aff601727a2dc5dd3b9ea985db82ca1225ea"} Dec 10 01:12:18 crc kubenswrapper[4884]: I1210 01:12:18.982555 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wt8wg" event={"ID":"d98d64df-3ea1-4df4-ad05-397c29b53117","Type":"ContainerStarted","Data":"9dbd0cc17bb7fac132d1829988d649f2c60a8d01bd7f3fd10df7dc0f35a46ae7"} Dec 10 01:12:19 crc kubenswrapper[4884]: I1210 01:12:19.994139 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wt8wg" event={"ID":"d98d64df-3ea1-4df4-ad05-397c29b53117","Type":"ContainerStarted","Data":"594741802bd59045f313e85cdc6ea72bf30bece82715460c5fe1e8eb40366772"} Dec 10 01:12:20 crc kubenswrapper[4884]: I1210 01:12:20.287194 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:12:20 crc kubenswrapper[4884]: E1210 01:12:20.287736 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:12:21 crc kubenswrapper[4884]: I1210 01:12:21.011982 4884 generic.go:334] "Generic (PLEG): container finished" podID="d98d64df-3ea1-4df4-ad05-397c29b53117" containerID="594741802bd59045f313e85cdc6ea72bf30bece82715460c5fe1e8eb40366772" exitCode=0 Dec 10 01:12:21 crc kubenswrapper[4884]: I1210 01:12:21.012097 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wt8wg" event={"ID":"d98d64df-3ea1-4df4-ad05-397c29b53117","Type":"ContainerDied","Data":"594741802bd59045f313e85cdc6ea72bf30bece82715460c5fe1e8eb40366772"} Dec 10 01:12:21 crc kubenswrapper[4884]: E1210 01:12:21.290421 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:12:21 crc kubenswrapper[4884]: E1210 01:12:21.290471 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:12:22 crc kubenswrapper[4884]: I1210 01:12:22.025562 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wt8wg" event={"ID":"d98d64df-3ea1-4df4-ad05-397c29b53117","Type":"ContainerStarted","Data":"f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39"} Dec 10 01:12:22 crc kubenswrapper[4884]: I1210 01:12:22.046247 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wt8wg" podStartSLOduration=2.433828712 podStartE2EDuration="5.046224956s" podCreationTimestamp="2025-12-10 01:12:17 +0000 UTC" firstStartedPulling="2025-12-10 01:12:18.986122043 +0000 UTC m=+2512.064079160" lastFinishedPulling="2025-12-10 01:12:21.598518277 +0000 UTC m=+2514.676475404" observedRunningTime="2025-12-10 01:12:22.04342352 +0000 UTC m=+2515.121380647" watchObservedRunningTime="2025-12-10 01:12:22.046224956 +0000 UTC m=+2515.124182073" Dec 10 01:12:27 crc kubenswrapper[4884]: I1210 01:12:27.827667 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:27 crc kubenswrapper[4884]: I1210 01:12:27.829583 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:27 crc kubenswrapper[4884]: I1210 01:12:27.878624 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:28 crc kubenswrapper[4884]: I1210 01:12:28.170143 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:28 crc kubenswrapper[4884]: I1210 01:12:28.230769 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wt8wg"] Dec 10 01:12:30 crc kubenswrapper[4884]: I1210 01:12:30.126466 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wt8wg" podUID="d98d64df-3ea1-4df4-ad05-397c29b53117" containerName="registry-server" containerID="cri-o://f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39" gracePeriod=2 Dec 10 01:12:30 crc kubenswrapper[4884]: I1210 01:12:30.713423 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:30 crc kubenswrapper[4884]: I1210 01:12:30.800931 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d98d64df-3ea1-4df4-ad05-397c29b53117-catalog-content\") pod \"d98d64df-3ea1-4df4-ad05-397c29b53117\" (UID: \"d98d64df-3ea1-4df4-ad05-397c29b53117\") " Dec 10 01:12:30 crc kubenswrapper[4884]: I1210 01:12:30.801895 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d98d64df-3ea1-4df4-ad05-397c29b53117-utilities\") pod \"d98d64df-3ea1-4df4-ad05-397c29b53117\" (UID: \"d98d64df-3ea1-4df4-ad05-397c29b53117\") " Dec 10 01:12:30 crc kubenswrapper[4884]: I1210 01:12:30.802235 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frgbm\" (UniqueName: \"kubernetes.io/projected/d98d64df-3ea1-4df4-ad05-397c29b53117-kube-api-access-frgbm\") pod \"d98d64df-3ea1-4df4-ad05-397c29b53117\" (UID: \"d98d64df-3ea1-4df4-ad05-397c29b53117\") " Dec 10 01:12:30 crc kubenswrapper[4884]: I1210 01:12:30.802873 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d98d64df-3ea1-4df4-ad05-397c29b53117-utilities" (OuterVolumeSpecName: "utilities") pod "d98d64df-3ea1-4df4-ad05-397c29b53117" (UID: "d98d64df-3ea1-4df4-ad05-397c29b53117"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:12:30 crc kubenswrapper[4884]: I1210 01:12:30.813680 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d98d64df-3ea1-4df4-ad05-397c29b53117-kube-api-access-frgbm" (OuterVolumeSpecName: "kube-api-access-frgbm") pod "d98d64df-3ea1-4df4-ad05-397c29b53117" (UID: "d98d64df-3ea1-4df4-ad05-397c29b53117"). InnerVolumeSpecName "kube-api-access-frgbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:12:30 crc kubenswrapper[4884]: I1210 01:12:30.828269 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d98d64df-3ea1-4df4-ad05-397c29b53117-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d98d64df-3ea1-4df4-ad05-397c29b53117" (UID: "d98d64df-3ea1-4df4-ad05-397c29b53117"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:12:30 crc kubenswrapper[4884]: I1210 01:12:30.905160 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frgbm\" (UniqueName: \"kubernetes.io/projected/d98d64df-3ea1-4df4-ad05-397c29b53117-kube-api-access-frgbm\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:30 crc kubenswrapper[4884]: I1210 01:12:30.905207 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d98d64df-3ea1-4df4-ad05-397c29b53117-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:30 crc kubenswrapper[4884]: I1210 01:12:30.905224 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d98d64df-3ea1-4df4-ad05-397c29b53117-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.137264 4884 generic.go:334] "Generic (PLEG): container finished" podID="d98d64df-3ea1-4df4-ad05-397c29b53117" containerID="f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39" exitCode=0 Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.137302 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wt8wg" event={"ID":"d98d64df-3ea1-4df4-ad05-397c29b53117","Type":"ContainerDied","Data":"f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39"} Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.137328 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wt8wg" event={"ID":"d98d64df-3ea1-4df4-ad05-397c29b53117","Type":"ContainerDied","Data":"9dbd0cc17bb7fac132d1829988d649f2c60a8d01bd7f3fd10df7dc0f35a46ae7"} Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.137344 4884 scope.go:117] "RemoveContainer" containerID="f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39" Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.137470 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wt8wg" Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.169423 4884 scope.go:117] "RemoveContainer" containerID="594741802bd59045f313e85cdc6ea72bf30bece82715460c5fe1e8eb40366772" Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.187731 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wt8wg"] Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.194411 4884 scope.go:117] "RemoveContainer" containerID="db947d6676d26af2a74a1780d3c2aff601727a2dc5dd3b9ea985db82ca1225ea" Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.214816 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wt8wg"] Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.242109 4884 scope.go:117] "RemoveContainer" containerID="f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39" Dec 10 01:12:31 crc kubenswrapper[4884]: E1210 01:12:31.243976 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39\": container with ID starting with f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39 not found: ID does not exist" containerID="f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39" Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.244009 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39"} err="failed to get container status \"f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39\": rpc error: code = NotFound desc = could not find container \"f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39\": container with ID starting with f63c4a20cbb34a276b78989ef2199a8ec950672e5958319b5281d149d00ecf39 not found: ID does not exist" Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.244030 4884 scope.go:117] "RemoveContainer" containerID="594741802bd59045f313e85cdc6ea72bf30bece82715460c5fe1e8eb40366772" Dec 10 01:12:31 crc kubenswrapper[4884]: E1210 01:12:31.244343 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"594741802bd59045f313e85cdc6ea72bf30bece82715460c5fe1e8eb40366772\": container with ID starting with 594741802bd59045f313e85cdc6ea72bf30bece82715460c5fe1e8eb40366772 not found: ID does not exist" containerID="594741802bd59045f313e85cdc6ea72bf30bece82715460c5fe1e8eb40366772" Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.244368 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"594741802bd59045f313e85cdc6ea72bf30bece82715460c5fe1e8eb40366772"} err="failed to get container status \"594741802bd59045f313e85cdc6ea72bf30bece82715460c5fe1e8eb40366772\": rpc error: code = NotFound desc = could not find container \"594741802bd59045f313e85cdc6ea72bf30bece82715460c5fe1e8eb40366772\": container with ID starting with 594741802bd59045f313e85cdc6ea72bf30bece82715460c5fe1e8eb40366772 not found: ID does not exist" Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.244382 4884 scope.go:117] "RemoveContainer" containerID="db947d6676d26af2a74a1780d3c2aff601727a2dc5dd3b9ea985db82ca1225ea" Dec 10 01:12:31 crc kubenswrapper[4884]: E1210 01:12:31.244620 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db947d6676d26af2a74a1780d3c2aff601727a2dc5dd3b9ea985db82ca1225ea\": container with ID starting with db947d6676d26af2a74a1780d3c2aff601727a2dc5dd3b9ea985db82ca1225ea not found: ID does not exist" containerID="db947d6676d26af2a74a1780d3c2aff601727a2dc5dd3b9ea985db82ca1225ea" Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.244643 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db947d6676d26af2a74a1780d3c2aff601727a2dc5dd3b9ea985db82ca1225ea"} err="failed to get container status \"db947d6676d26af2a74a1780d3c2aff601727a2dc5dd3b9ea985db82ca1225ea\": rpc error: code = NotFound desc = could not find container \"db947d6676d26af2a74a1780d3c2aff601727a2dc5dd3b9ea985db82ca1225ea\": container with ID starting with db947d6676d26af2a74a1780d3c2aff601727a2dc5dd3b9ea985db82ca1225ea not found: ID does not exist" Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.287948 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:12:31 crc kubenswrapper[4884]: E1210 01:12:31.288226 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:12:31 crc kubenswrapper[4884]: I1210 01:12:31.330949 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d98d64df-3ea1-4df4-ad05-397c29b53117" path="/var/lib/kubelet/pods/d98d64df-3ea1-4df4-ad05-397c29b53117/volumes" Dec 10 01:12:35 crc kubenswrapper[4884]: E1210 01:12:35.291027 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:12:36 crc kubenswrapper[4884]: E1210 01:12:36.288764 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:12:44 crc kubenswrapper[4884]: I1210 01:12:44.287722 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:12:44 crc kubenswrapper[4884]: E1210 01:12:44.288712 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:12:46 crc kubenswrapper[4884]: E1210 01:12:46.292477 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:12:51 crc kubenswrapper[4884]: E1210 01:12:51.291346 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:12:54 crc kubenswrapper[4884]: I1210 01:12:54.914361 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xjhgl"] Dec 10 01:12:54 crc kubenswrapper[4884]: E1210 01:12:54.915547 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d98d64df-3ea1-4df4-ad05-397c29b53117" containerName="extract-content" Dec 10 01:12:54 crc kubenswrapper[4884]: I1210 01:12:54.915565 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d98d64df-3ea1-4df4-ad05-397c29b53117" containerName="extract-content" Dec 10 01:12:54 crc kubenswrapper[4884]: E1210 01:12:54.915584 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d98d64df-3ea1-4df4-ad05-397c29b53117" containerName="extract-utilities" Dec 10 01:12:54 crc kubenswrapper[4884]: I1210 01:12:54.915592 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d98d64df-3ea1-4df4-ad05-397c29b53117" containerName="extract-utilities" Dec 10 01:12:54 crc kubenswrapper[4884]: E1210 01:12:54.915611 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d98d64df-3ea1-4df4-ad05-397c29b53117" containerName="registry-server" Dec 10 01:12:54 crc kubenswrapper[4884]: I1210 01:12:54.915619 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="d98d64df-3ea1-4df4-ad05-397c29b53117" containerName="registry-server" Dec 10 01:12:54 crc kubenswrapper[4884]: I1210 01:12:54.915851 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="d98d64df-3ea1-4df4-ad05-397c29b53117" containerName="registry-server" Dec 10 01:12:54 crc kubenswrapper[4884]: I1210 01:12:54.918077 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:12:54 crc kubenswrapper[4884]: I1210 01:12:54.930287 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xjhgl"] Dec 10 01:12:54 crc kubenswrapper[4884]: I1210 01:12:54.959016 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e271cf19-0730-4ad2-9593-9b0eee408f11-catalog-content\") pod \"redhat-operators-xjhgl\" (UID: \"e271cf19-0730-4ad2-9593-9b0eee408f11\") " pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:12:54 crc kubenswrapper[4884]: I1210 01:12:54.959790 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tz5zz\" (UniqueName: \"kubernetes.io/projected/e271cf19-0730-4ad2-9593-9b0eee408f11-kube-api-access-tz5zz\") pod \"redhat-operators-xjhgl\" (UID: \"e271cf19-0730-4ad2-9593-9b0eee408f11\") " pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:12:54 crc kubenswrapper[4884]: I1210 01:12:54.960062 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e271cf19-0730-4ad2-9593-9b0eee408f11-utilities\") pod \"redhat-operators-xjhgl\" (UID: \"e271cf19-0730-4ad2-9593-9b0eee408f11\") " pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:12:55 crc kubenswrapper[4884]: I1210 01:12:55.062135 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tz5zz\" (UniqueName: \"kubernetes.io/projected/e271cf19-0730-4ad2-9593-9b0eee408f11-kube-api-access-tz5zz\") pod \"redhat-operators-xjhgl\" (UID: \"e271cf19-0730-4ad2-9593-9b0eee408f11\") " pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:12:55 crc kubenswrapper[4884]: I1210 01:12:55.062267 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e271cf19-0730-4ad2-9593-9b0eee408f11-utilities\") pod \"redhat-operators-xjhgl\" (UID: \"e271cf19-0730-4ad2-9593-9b0eee408f11\") " pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:12:55 crc kubenswrapper[4884]: I1210 01:12:55.062331 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e271cf19-0730-4ad2-9593-9b0eee408f11-catalog-content\") pod \"redhat-operators-xjhgl\" (UID: \"e271cf19-0730-4ad2-9593-9b0eee408f11\") " pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:12:55 crc kubenswrapper[4884]: I1210 01:12:55.062790 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e271cf19-0730-4ad2-9593-9b0eee408f11-utilities\") pod \"redhat-operators-xjhgl\" (UID: \"e271cf19-0730-4ad2-9593-9b0eee408f11\") " pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:12:55 crc kubenswrapper[4884]: I1210 01:12:55.062859 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e271cf19-0730-4ad2-9593-9b0eee408f11-catalog-content\") pod \"redhat-operators-xjhgl\" (UID: \"e271cf19-0730-4ad2-9593-9b0eee408f11\") " pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:12:55 crc kubenswrapper[4884]: I1210 01:12:55.095546 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tz5zz\" (UniqueName: \"kubernetes.io/projected/e271cf19-0730-4ad2-9593-9b0eee408f11-kube-api-access-tz5zz\") pod \"redhat-operators-xjhgl\" (UID: \"e271cf19-0730-4ad2-9593-9b0eee408f11\") " pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:12:55 crc kubenswrapper[4884]: I1210 01:12:55.243750 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:12:55 crc kubenswrapper[4884]: I1210 01:12:55.772166 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xjhgl"] Dec 10 01:12:56 crc kubenswrapper[4884]: I1210 01:12:56.407111 4884 generic.go:334] "Generic (PLEG): container finished" podID="e271cf19-0730-4ad2-9593-9b0eee408f11" containerID="cde06f00e31418d83d6de23fe63c27ab228ac61e0fc3d1dc3072c74c247356b8" exitCode=0 Dec 10 01:12:56 crc kubenswrapper[4884]: I1210 01:12:56.407189 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjhgl" event={"ID":"e271cf19-0730-4ad2-9593-9b0eee408f11","Type":"ContainerDied","Data":"cde06f00e31418d83d6de23fe63c27ab228ac61e0fc3d1dc3072c74c247356b8"} Dec 10 01:12:56 crc kubenswrapper[4884]: I1210 01:12:56.407379 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjhgl" event={"ID":"e271cf19-0730-4ad2-9593-9b0eee408f11","Type":"ContainerStarted","Data":"e8798f4ca195b0bf28ecfd146b8b187a3291d1dabb95da5fd70d736b25414e20"} Dec 10 01:12:57 crc kubenswrapper[4884]: I1210 01:12:57.302127 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:12:58 crc kubenswrapper[4884]: I1210 01:12:58.429170 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjhgl" event={"ID":"e271cf19-0730-4ad2-9593-9b0eee408f11","Type":"ContainerStarted","Data":"ac96e83c1942fa0aa72a3bbb802aef590c8c4620cb26bb4ea099413986a71d56"} Dec 10 01:12:58 crc kubenswrapper[4884]: I1210 01:12:58.432751 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"63276cf5fac278ac47ca3f840ee652628693680ece5a047e41350771c1712289"} Dec 10 01:13:01 crc kubenswrapper[4884]: E1210 01:13:01.289852 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:13:02 crc kubenswrapper[4884]: I1210 01:13:02.494948 4884 generic.go:334] "Generic (PLEG): container finished" podID="e271cf19-0730-4ad2-9593-9b0eee408f11" containerID="ac96e83c1942fa0aa72a3bbb802aef590c8c4620cb26bb4ea099413986a71d56" exitCode=0 Dec 10 01:13:02 crc kubenswrapper[4884]: I1210 01:13:02.495001 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjhgl" event={"ID":"e271cf19-0730-4ad2-9593-9b0eee408f11","Type":"ContainerDied","Data":"ac96e83c1942fa0aa72a3bbb802aef590c8c4620cb26bb4ea099413986a71d56"} Dec 10 01:13:04 crc kubenswrapper[4884]: I1210 01:13:04.517598 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjhgl" event={"ID":"e271cf19-0730-4ad2-9593-9b0eee408f11","Type":"ContainerStarted","Data":"053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751"} Dec 10 01:13:04 crc kubenswrapper[4884]: I1210 01:13:04.543544 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xjhgl" podStartSLOduration=3.566504136 podStartE2EDuration="10.543526864s" podCreationTimestamp="2025-12-10 01:12:54 +0000 UTC" firstStartedPulling="2025-12-10 01:12:56.409552069 +0000 UTC m=+2549.487509186" lastFinishedPulling="2025-12-10 01:13:03.386574797 +0000 UTC m=+2556.464531914" observedRunningTime="2025-12-10 01:13:04.535226569 +0000 UTC m=+2557.613183686" watchObservedRunningTime="2025-12-10 01:13:04.543526864 +0000 UTC m=+2557.621483981" Dec 10 01:13:05 crc kubenswrapper[4884]: I1210 01:13:05.244480 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:13:05 crc kubenswrapper[4884]: I1210 01:13:05.244531 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:13:05 crc kubenswrapper[4884]: E1210 01:13:05.289269 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:13:06 crc kubenswrapper[4884]: I1210 01:13:06.301471 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xjhgl" podUID="e271cf19-0730-4ad2-9593-9b0eee408f11" containerName="registry-server" probeResult="failure" output=< Dec 10 01:13:06 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 01:13:06 crc kubenswrapper[4884]: > Dec 10 01:13:14 crc kubenswrapper[4884]: E1210 01:13:14.289882 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:13:15 crc kubenswrapper[4884]: I1210 01:13:15.343641 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:13:15 crc kubenswrapper[4884]: I1210 01:13:15.424916 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:13:15 crc kubenswrapper[4884]: I1210 01:13:15.588117 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xjhgl"] Dec 10 01:13:16 crc kubenswrapper[4884]: I1210 01:13:16.673756 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xjhgl" podUID="e271cf19-0730-4ad2-9593-9b0eee408f11" containerName="registry-server" containerID="cri-o://053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751" gracePeriod=2 Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.210257 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.376983 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e271cf19-0730-4ad2-9593-9b0eee408f11-utilities\") pod \"e271cf19-0730-4ad2-9593-9b0eee408f11\" (UID: \"e271cf19-0730-4ad2-9593-9b0eee408f11\") " Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.377226 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tz5zz\" (UniqueName: \"kubernetes.io/projected/e271cf19-0730-4ad2-9593-9b0eee408f11-kube-api-access-tz5zz\") pod \"e271cf19-0730-4ad2-9593-9b0eee408f11\" (UID: \"e271cf19-0730-4ad2-9593-9b0eee408f11\") " Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.377300 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e271cf19-0730-4ad2-9593-9b0eee408f11-catalog-content\") pod \"e271cf19-0730-4ad2-9593-9b0eee408f11\" (UID: \"e271cf19-0730-4ad2-9593-9b0eee408f11\") " Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.377910 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e271cf19-0730-4ad2-9593-9b0eee408f11-utilities" (OuterVolumeSpecName: "utilities") pod "e271cf19-0730-4ad2-9593-9b0eee408f11" (UID: "e271cf19-0730-4ad2-9593-9b0eee408f11"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.378115 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e271cf19-0730-4ad2-9593-9b0eee408f11-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.382419 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e271cf19-0730-4ad2-9593-9b0eee408f11-kube-api-access-tz5zz" (OuterVolumeSpecName: "kube-api-access-tz5zz") pod "e271cf19-0730-4ad2-9593-9b0eee408f11" (UID: "e271cf19-0730-4ad2-9593-9b0eee408f11"). InnerVolumeSpecName "kube-api-access-tz5zz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.481054 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tz5zz\" (UniqueName: \"kubernetes.io/projected/e271cf19-0730-4ad2-9593-9b0eee408f11-kube-api-access-tz5zz\") on node \"crc\" DevicePath \"\"" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.487545 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e271cf19-0730-4ad2-9593-9b0eee408f11-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e271cf19-0730-4ad2-9593-9b0eee408f11" (UID: "e271cf19-0730-4ad2-9593-9b0eee408f11"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.583315 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e271cf19-0730-4ad2-9593-9b0eee408f11-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.692368 4884 generic.go:334] "Generic (PLEG): container finished" podID="e271cf19-0730-4ad2-9593-9b0eee408f11" containerID="053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751" exitCode=0 Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.692418 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjhgl" event={"ID":"e271cf19-0730-4ad2-9593-9b0eee408f11","Type":"ContainerDied","Data":"053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751"} Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.692462 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xjhgl" event={"ID":"e271cf19-0730-4ad2-9593-9b0eee408f11","Type":"ContainerDied","Data":"e8798f4ca195b0bf28ecfd146b8b187a3291d1dabb95da5fd70d736b25414e20"} Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.692484 4884 scope.go:117] "RemoveContainer" containerID="053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.692499 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xjhgl" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.738155 4884 scope.go:117] "RemoveContainer" containerID="ac96e83c1942fa0aa72a3bbb802aef590c8c4620cb26bb4ea099413986a71d56" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.752388 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xjhgl"] Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.764523 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xjhgl"] Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.791007 4884 scope.go:117] "RemoveContainer" containerID="cde06f00e31418d83d6de23fe63c27ab228ac61e0fc3d1dc3072c74c247356b8" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.834372 4884 scope.go:117] "RemoveContainer" containerID="053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751" Dec 10 01:13:17 crc kubenswrapper[4884]: E1210 01:13:17.834965 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751\": container with ID starting with 053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751 not found: ID does not exist" containerID="053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.835056 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751"} err="failed to get container status \"053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751\": rpc error: code = NotFound desc = could not find container \"053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751\": container with ID starting with 053470b6b1e5517846ae976f8110dbb4253a66a1a8dd9cd8099ab1aa83456751 not found: ID does not exist" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.835112 4884 scope.go:117] "RemoveContainer" containerID="ac96e83c1942fa0aa72a3bbb802aef590c8c4620cb26bb4ea099413986a71d56" Dec 10 01:13:17 crc kubenswrapper[4884]: E1210 01:13:17.835790 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac96e83c1942fa0aa72a3bbb802aef590c8c4620cb26bb4ea099413986a71d56\": container with ID starting with ac96e83c1942fa0aa72a3bbb802aef590c8c4620cb26bb4ea099413986a71d56 not found: ID does not exist" containerID="ac96e83c1942fa0aa72a3bbb802aef590c8c4620cb26bb4ea099413986a71d56" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.835848 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac96e83c1942fa0aa72a3bbb802aef590c8c4620cb26bb4ea099413986a71d56"} err="failed to get container status \"ac96e83c1942fa0aa72a3bbb802aef590c8c4620cb26bb4ea099413986a71d56\": rpc error: code = NotFound desc = could not find container \"ac96e83c1942fa0aa72a3bbb802aef590c8c4620cb26bb4ea099413986a71d56\": container with ID starting with ac96e83c1942fa0aa72a3bbb802aef590c8c4620cb26bb4ea099413986a71d56 not found: ID does not exist" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.835875 4884 scope.go:117] "RemoveContainer" containerID="cde06f00e31418d83d6de23fe63c27ab228ac61e0fc3d1dc3072c74c247356b8" Dec 10 01:13:17 crc kubenswrapper[4884]: E1210 01:13:17.836203 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cde06f00e31418d83d6de23fe63c27ab228ac61e0fc3d1dc3072c74c247356b8\": container with ID starting with cde06f00e31418d83d6de23fe63c27ab228ac61e0fc3d1dc3072c74c247356b8 not found: ID does not exist" containerID="cde06f00e31418d83d6de23fe63c27ab228ac61e0fc3d1dc3072c74c247356b8" Dec 10 01:13:17 crc kubenswrapper[4884]: I1210 01:13:17.836235 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cde06f00e31418d83d6de23fe63c27ab228ac61e0fc3d1dc3072c74c247356b8"} err="failed to get container status \"cde06f00e31418d83d6de23fe63c27ab228ac61e0fc3d1dc3072c74c247356b8\": rpc error: code = NotFound desc = could not find container \"cde06f00e31418d83d6de23fe63c27ab228ac61e0fc3d1dc3072c74c247356b8\": container with ID starting with cde06f00e31418d83d6de23fe63c27ab228ac61e0fc3d1dc3072c74c247356b8 not found: ID does not exist" Dec 10 01:13:19 crc kubenswrapper[4884]: I1210 01:13:19.299491 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e271cf19-0730-4ad2-9593-9b0eee408f11" path="/var/lib/kubelet/pods/e271cf19-0730-4ad2-9593-9b0eee408f11/volumes" Dec 10 01:13:20 crc kubenswrapper[4884]: E1210 01:13:20.291559 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:13:20 crc kubenswrapper[4884]: I1210 01:13:20.738106 4884 generic.go:334] "Generic (PLEG): container finished" podID="14b622f8-c484-476b-8024-8c1afeef15c2" containerID="4ac664e610e32f8a6c1344f54a15968b0088ccad4a75b0d4e1fe9331f5574c34" exitCode=0 Dec 10 01:13:20 crc kubenswrapper[4884]: I1210 01:13:20.738213 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" event={"ID":"14b622f8-c484-476b-8024-8c1afeef15c2","Type":"ContainerDied","Data":"4ac664e610e32f8a6c1344f54a15968b0088ccad4a75b0d4e1fe9331f5574c34"} Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.302947 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.392154 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-ovn-combined-ca-bundle\") pod \"14b622f8-c484-476b-8024-8c1afeef15c2\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.392331 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-inventory\") pod \"14b622f8-c484-476b-8024-8c1afeef15c2\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.392472 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/14b622f8-c484-476b-8024-8c1afeef15c2-ovncontroller-config-0\") pod \"14b622f8-c484-476b-8024-8c1afeef15c2\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.392571 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48fxf\" (UniqueName: \"kubernetes.io/projected/14b622f8-c484-476b-8024-8c1afeef15c2-kube-api-access-48fxf\") pod \"14b622f8-c484-476b-8024-8c1afeef15c2\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.392609 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-ssh-key\") pod \"14b622f8-c484-476b-8024-8c1afeef15c2\" (UID: \"14b622f8-c484-476b-8024-8c1afeef15c2\") " Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.398557 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14b622f8-c484-476b-8024-8c1afeef15c2-kube-api-access-48fxf" (OuterVolumeSpecName: "kube-api-access-48fxf") pod "14b622f8-c484-476b-8024-8c1afeef15c2" (UID: "14b622f8-c484-476b-8024-8c1afeef15c2"). InnerVolumeSpecName "kube-api-access-48fxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.398641 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "14b622f8-c484-476b-8024-8c1afeef15c2" (UID: "14b622f8-c484-476b-8024-8c1afeef15c2"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.426763 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14b622f8-c484-476b-8024-8c1afeef15c2-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "14b622f8-c484-476b-8024-8c1afeef15c2" (UID: "14b622f8-c484-476b-8024-8c1afeef15c2"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.429337 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "14b622f8-c484-476b-8024-8c1afeef15c2" (UID: "14b622f8-c484-476b-8024-8c1afeef15c2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.448846 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-inventory" (OuterVolumeSpecName: "inventory") pod "14b622f8-c484-476b-8024-8c1afeef15c2" (UID: "14b622f8-c484-476b-8024-8c1afeef15c2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.495474 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48fxf\" (UniqueName: \"kubernetes.io/projected/14b622f8-c484-476b-8024-8c1afeef15c2-kube-api-access-48fxf\") on node \"crc\" DevicePath \"\"" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.495512 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.495524 4884 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.495540 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/14b622f8-c484-476b-8024-8c1afeef15c2-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.495553 4884 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/14b622f8-c484-476b-8024-8c1afeef15c2-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.766601 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" event={"ID":"14b622f8-c484-476b-8024-8c1afeef15c2","Type":"ContainerDied","Data":"8823aaf8e2b881683c3565dbacf4e77b021b7daed39cb2ebda0ec4c401aec9c9"} Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.766670 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8823aaf8e2b881683c3565dbacf4e77b021b7daed39cb2ebda0ec4c401aec9c9" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.766733 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-h92sf" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.894168 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm"] Dec 10 01:13:22 crc kubenswrapper[4884]: E1210 01:13:22.895008 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e271cf19-0730-4ad2-9593-9b0eee408f11" containerName="registry-server" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.895110 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e271cf19-0730-4ad2-9593-9b0eee408f11" containerName="registry-server" Dec 10 01:13:22 crc kubenswrapper[4884]: E1210 01:13:22.895224 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14b622f8-c484-476b-8024-8c1afeef15c2" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.895300 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="14b622f8-c484-476b-8024-8c1afeef15c2" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 10 01:13:22 crc kubenswrapper[4884]: E1210 01:13:22.895385 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e271cf19-0730-4ad2-9593-9b0eee408f11" containerName="extract-utilities" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.895481 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e271cf19-0730-4ad2-9593-9b0eee408f11" containerName="extract-utilities" Dec 10 01:13:22 crc kubenswrapper[4884]: E1210 01:13:22.896364 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e271cf19-0730-4ad2-9593-9b0eee408f11" containerName="extract-content" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.896461 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e271cf19-0730-4ad2-9593-9b0eee408f11" containerName="extract-content" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.896805 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="14b622f8-c484-476b-8024-8c1afeef15c2" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.896923 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e271cf19-0730-4ad2-9593-9b0eee408f11" containerName="registry-server" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.897916 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.904601 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.905768 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.905938 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.906313 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.905766 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:13:22 crc kubenswrapper[4884]: I1210 01:13:22.939669 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm"] Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.004746 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srn6h\" (UniqueName: \"kubernetes.io/projected/93f31477-32e4-4873-95f9-43327b02f0c8-kube-api-access-srn6h\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.004817 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.004895 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.005151 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.005354 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.108549 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.108823 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.110082 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srn6h\" (UniqueName: \"kubernetes.io/projected/93f31477-32e4-4873-95f9-43327b02f0c8-kube-api-access-srn6h\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.110227 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.110346 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.116677 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.116706 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.116810 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.122674 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.133667 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srn6h\" (UniqueName: \"kubernetes.io/projected/93f31477-32e4-4873-95f9-43327b02f0c8-kube-api-access-srn6h\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.223866 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:13:23 crc kubenswrapper[4884]: I1210 01:13:23.894952 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm"] Dec 10 01:13:24 crc kubenswrapper[4884]: I1210 01:13:24.795545 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" event={"ID":"93f31477-32e4-4873-95f9-43327b02f0c8","Type":"ContainerStarted","Data":"82e64fe413897bef427e533db3afa5750afcf6315ec1fc4bc5709ef8adbe1cba"} Dec 10 01:13:24 crc kubenswrapper[4884]: I1210 01:13:24.795986 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" event={"ID":"93f31477-32e4-4873-95f9-43327b02f0c8","Type":"ContainerStarted","Data":"728ceb4cf3b9477b9cd1bf5bd5194f7bd954f54d73d339c56f1bc14e3f2a14fe"} Dec 10 01:13:24 crc kubenswrapper[4884]: I1210 01:13:24.817822 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" podStartSLOduration=2.253433222 podStartE2EDuration="2.817796778s" podCreationTimestamp="2025-12-10 01:13:22 +0000 UTC" firstStartedPulling="2025-12-10 01:13:23.903165811 +0000 UTC m=+2576.981122938" lastFinishedPulling="2025-12-10 01:13:24.467529357 +0000 UTC m=+2577.545486494" observedRunningTime="2025-12-10 01:13:24.81604274 +0000 UTC m=+2577.893999917" watchObservedRunningTime="2025-12-10 01:13:24.817796778 +0000 UTC m=+2577.895753935" Dec 10 01:13:29 crc kubenswrapper[4884]: E1210 01:13:29.291658 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:13:32 crc kubenswrapper[4884]: E1210 01:13:32.289511 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:13:41 crc kubenswrapper[4884]: E1210 01:13:41.291013 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:13:47 crc kubenswrapper[4884]: E1210 01:13:47.291718 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:13:52 crc kubenswrapper[4884]: E1210 01:13:52.291580 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:14:01 crc kubenswrapper[4884]: E1210 01:14:01.289724 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:14:06 crc kubenswrapper[4884]: E1210 01:14:06.289792 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:14:14 crc kubenswrapper[4884]: E1210 01:14:14.290666 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:14:17 crc kubenswrapper[4884]: E1210 01:14:17.311029 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:14:29 crc kubenswrapper[4884]: E1210 01:14:29.290056 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:14:32 crc kubenswrapper[4884]: E1210 01:14:32.294539 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:14:42 crc kubenswrapper[4884]: E1210 01:14:42.290224 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:14:46 crc kubenswrapper[4884]: E1210 01:14:46.290851 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:14:54 crc kubenswrapper[4884]: E1210 01:14:54.290338 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:14:59 crc kubenswrapper[4884]: E1210 01:14:59.291815 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.147973 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk"] Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.149827 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.152563 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.153805 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.167398 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk"] Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.316604 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6vn5\" (UniqueName: \"kubernetes.io/projected/21296a82-a64c-4080-8048-c3e408ac6ffd-kube-api-access-r6vn5\") pod \"collect-profiles-29422155-pt2vk\" (UID: \"21296a82-a64c-4080-8048-c3e408ac6ffd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.316889 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/21296a82-a64c-4080-8048-c3e408ac6ffd-config-volume\") pod \"collect-profiles-29422155-pt2vk\" (UID: \"21296a82-a64c-4080-8048-c3e408ac6ffd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.316931 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/21296a82-a64c-4080-8048-c3e408ac6ffd-secret-volume\") pod \"collect-profiles-29422155-pt2vk\" (UID: \"21296a82-a64c-4080-8048-c3e408ac6ffd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.419223 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6vn5\" (UniqueName: \"kubernetes.io/projected/21296a82-a64c-4080-8048-c3e408ac6ffd-kube-api-access-r6vn5\") pod \"collect-profiles-29422155-pt2vk\" (UID: \"21296a82-a64c-4080-8048-c3e408ac6ffd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.421202 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/21296a82-a64c-4080-8048-c3e408ac6ffd-config-volume\") pod \"collect-profiles-29422155-pt2vk\" (UID: \"21296a82-a64c-4080-8048-c3e408ac6ffd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.421361 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/21296a82-a64c-4080-8048-c3e408ac6ffd-secret-volume\") pod \"collect-profiles-29422155-pt2vk\" (UID: \"21296a82-a64c-4080-8048-c3e408ac6ffd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.422080 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/21296a82-a64c-4080-8048-c3e408ac6ffd-config-volume\") pod \"collect-profiles-29422155-pt2vk\" (UID: \"21296a82-a64c-4080-8048-c3e408ac6ffd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.429714 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/21296a82-a64c-4080-8048-c3e408ac6ffd-secret-volume\") pod \"collect-profiles-29422155-pt2vk\" (UID: \"21296a82-a64c-4080-8048-c3e408ac6ffd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.435239 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6vn5\" (UniqueName: \"kubernetes.io/projected/21296a82-a64c-4080-8048-c3e408ac6ffd-kube-api-access-r6vn5\") pod \"collect-profiles-29422155-pt2vk\" (UID: \"21296a82-a64c-4080-8048-c3e408ac6ffd\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.480838 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:00 crc kubenswrapper[4884]: I1210 01:15:00.989268 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk"] Dec 10 01:15:01 crc kubenswrapper[4884]: I1210 01:15:01.089867 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" event={"ID":"21296a82-a64c-4080-8048-c3e408ac6ffd","Type":"ContainerStarted","Data":"44bc8486f08e60e27dd8b70c73926c0e7e0bd1ad34328bbaf38965752a80d531"} Dec 10 01:15:02 crc kubenswrapper[4884]: I1210 01:15:02.105185 4884 generic.go:334] "Generic (PLEG): container finished" podID="21296a82-a64c-4080-8048-c3e408ac6ffd" containerID="4cfc306f0ae70b8d884eee9ac8d50ecc2e0bdb72a46a3369a3231aa2662dd063" exitCode=0 Dec 10 01:15:02 crc kubenswrapper[4884]: I1210 01:15:02.105258 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" event={"ID":"21296a82-a64c-4080-8048-c3e408ac6ffd","Type":"ContainerDied","Data":"4cfc306f0ae70b8d884eee9ac8d50ecc2e0bdb72a46a3369a3231aa2662dd063"} Dec 10 01:15:03 crc kubenswrapper[4884]: I1210 01:15:03.486824 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:03 crc kubenswrapper[4884]: I1210 01:15:03.591058 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/21296a82-a64c-4080-8048-c3e408ac6ffd-secret-volume\") pod \"21296a82-a64c-4080-8048-c3e408ac6ffd\" (UID: \"21296a82-a64c-4080-8048-c3e408ac6ffd\") " Dec 10 01:15:03 crc kubenswrapper[4884]: I1210 01:15:03.591242 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r6vn5\" (UniqueName: \"kubernetes.io/projected/21296a82-a64c-4080-8048-c3e408ac6ffd-kube-api-access-r6vn5\") pod \"21296a82-a64c-4080-8048-c3e408ac6ffd\" (UID: \"21296a82-a64c-4080-8048-c3e408ac6ffd\") " Dec 10 01:15:03 crc kubenswrapper[4884]: I1210 01:15:03.591321 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/21296a82-a64c-4080-8048-c3e408ac6ffd-config-volume\") pod \"21296a82-a64c-4080-8048-c3e408ac6ffd\" (UID: \"21296a82-a64c-4080-8048-c3e408ac6ffd\") " Dec 10 01:15:03 crc kubenswrapper[4884]: I1210 01:15:03.592043 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21296a82-a64c-4080-8048-c3e408ac6ffd-config-volume" (OuterVolumeSpecName: "config-volume") pod "21296a82-a64c-4080-8048-c3e408ac6ffd" (UID: "21296a82-a64c-4080-8048-c3e408ac6ffd"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 01:15:03 crc kubenswrapper[4884]: I1210 01:15:03.597527 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21296a82-a64c-4080-8048-c3e408ac6ffd-kube-api-access-r6vn5" (OuterVolumeSpecName: "kube-api-access-r6vn5") pod "21296a82-a64c-4080-8048-c3e408ac6ffd" (UID: "21296a82-a64c-4080-8048-c3e408ac6ffd"). InnerVolumeSpecName "kube-api-access-r6vn5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:15:03 crc kubenswrapper[4884]: I1210 01:15:03.598116 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21296a82-a64c-4080-8048-c3e408ac6ffd-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "21296a82-a64c-4080-8048-c3e408ac6ffd" (UID: "21296a82-a64c-4080-8048-c3e408ac6ffd"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:15:03 crc kubenswrapper[4884]: I1210 01:15:03.693472 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r6vn5\" (UniqueName: \"kubernetes.io/projected/21296a82-a64c-4080-8048-c3e408ac6ffd-kube-api-access-r6vn5\") on node \"crc\" DevicePath \"\"" Dec 10 01:15:03 crc kubenswrapper[4884]: I1210 01:15:03.693498 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/21296a82-a64c-4080-8048-c3e408ac6ffd-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 01:15:03 crc kubenswrapper[4884]: I1210 01:15:03.693509 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/21296a82-a64c-4080-8048-c3e408ac6ffd-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 01:15:04 crc kubenswrapper[4884]: I1210 01:15:04.129211 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" event={"ID":"21296a82-a64c-4080-8048-c3e408ac6ffd","Type":"ContainerDied","Data":"44bc8486f08e60e27dd8b70c73926c0e7e0bd1ad34328bbaf38965752a80d531"} Dec 10 01:15:04 crc kubenswrapper[4884]: I1210 01:15:04.129266 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="44bc8486f08e60e27dd8b70c73926c0e7e0bd1ad34328bbaf38965752a80d531" Dec 10 01:15:04 crc kubenswrapper[4884]: I1210 01:15:04.129272 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk" Dec 10 01:15:04 crc kubenswrapper[4884]: I1210 01:15:04.587486 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26"] Dec 10 01:15:04 crc kubenswrapper[4884]: I1210 01:15:04.598835 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422110-4mw26"] Dec 10 01:15:05 crc kubenswrapper[4884]: I1210 01:15:05.305200 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84" path="/var/lib/kubelet/pods/5ddbb27b-ef7f-4940-b8f6-3788ffc1ee84/volumes" Dec 10 01:15:09 crc kubenswrapper[4884]: I1210 01:15:09.291363 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 01:15:09 crc kubenswrapper[4884]: E1210 01:15:09.412680 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:15:09 crc kubenswrapper[4884]: E1210 01:15:09.412738 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:15:09 crc kubenswrapper[4884]: E1210 01:15:09.412859 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:15:09 crc kubenswrapper[4884]: E1210 01:15:09.414235 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:15:14 crc kubenswrapper[4884]: E1210 01:15:14.414967 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:15:14 crc kubenswrapper[4884]: E1210 01:15:14.416966 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:15:14 crc kubenswrapper[4884]: E1210 01:15:14.417305 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:15:14 crc kubenswrapper[4884]: E1210 01:15:14.418759 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:15:18 crc kubenswrapper[4884]: I1210 01:15:18.097635 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:15:18 crc kubenswrapper[4884]: I1210 01:15:18.097963 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:15:24 crc kubenswrapper[4884]: E1210 01:15:24.288976 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:15:25 crc kubenswrapper[4884]: E1210 01:15:25.288944 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:15:37 crc kubenswrapper[4884]: E1210 01:15:37.302871 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:15:39 crc kubenswrapper[4884]: E1210 01:15:39.290279 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:15:41 crc kubenswrapper[4884]: I1210 01:15:41.884371 4884 scope.go:117] "RemoveContainer" containerID="c1db5762ab4498c062049a31b5b657eb72dbeddf096d15940f898e86c445b03d" Dec 10 01:15:48 crc kubenswrapper[4884]: I1210 01:15:48.097850 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:15:48 crc kubenswrapper[4884]: I1210 01:15:48.098363 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:15:48 crc kubenswrapper[4884]: E1210 01:15:48.291491 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:15:51 crc kubenswrapper[4884]: E1210 01:15:51.290360 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:16:01 crc kubenswrapper[4884]: E1210 01:16:01.291035 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:16:06 crc kubenswrapper[4884]: E1210 01:16:06.290774 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:16:13 crc kubenswrapper[4884]: E1210 01:16:13.289313 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:16:17 crc kubenswrapper[4884]: E1210 01:16:17.305317 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:16:18 crc kubenswrapper[4884]: I1210 01:16:18.097694 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:16:18 crc kubenswrapper[4884]: I1210 01:16:18.097744 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:16:18 crc kubenswrapper[4884]: I1210 01:16:18.097788 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 01:16:18 crc kubenswrapper[4884]: I1210 01:16:18.098566 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"63276cf5fac278ac47ca3f840ee652628693680ece5a047e41350771c1712289"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 01:16:18 crc kubenswrapper[4884]: I1210 01:16:18.098619 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://63276cf5fac278ac47ca3f840ee652628693680ece5a047e41350771c1712289" gracePeriod=600 Dec 10 01:16:19 crc kubenswrapper[4884]: I1210 01:16:19.082391 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="63276cf5fac278ac47ca3f840ee652628693680ece5a047e41350771c1712289" exitCode=0 Dec 10 01:16:19 crc kubenswrapper[4884]: I1210 01:16:19.082507 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"63276cf5fac278ac47ca3f840ee652628693680ece5a047e41350771c1712289"} Dec 10 01:16:19 crc kubenswrapper[4884]: I1210 01:16:19.082979 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97"} Dec 10 01:16:19 crc kubenswrapper[4884]: I1210 01:16:19.083010 4884 scope.go:117] "RemoveContainer" containerID="95cc8353969ae7f422ab2b71a45af277f7329fbf8ac2e3af73444aaf831049cc" Dec 10 01:16:24 crc kubenswrapper[4884]: E1210 01:16:24.291366 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:16:29 crc kubenswrapper[4884]: E1210 01:16:29.301826 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:16:39 crc kubenswrapper[4884]: E1210 01:16:39.292968 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:16:41 crc kubenswrapper[4884]: E1210 01:16:41.290930 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:16:51 crc kubenswrapper[4884]: E1210 01:16:51.291158 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:16:53 crc kubenswrapper[4884]: E1210 01:16:53.289787 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:17:04 crc kubenswrapper[4884]: E1210 01:17:04.289993 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:17:06 crc kubenswrapper[4884]: E1210 01:17:06.291256 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:17:17 crc kubenswrapper[4884]: E1210 01:17:17.300788 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:17:18 crc kubenswrapper[4884]: E1210 01:17:18.289663 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:17:30 crc kubenswrapper[4884]: E1210 01:17:30.291130 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:17:30 crc kubenswrapper[4884]: E1210 01:17:30.291172 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:17:43 crc kubenswrapper[4884]: E1210 01:17:43.290313 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:17:43 crc kubenswrapper[4884]: E1210 01:17:43.290369 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:17:55 crc kubenswrapper[4884]: E1210 01:17:55.289887 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:17:57 crc kubenswrapper[4884]: E1210 01:17:57.289801 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:18:02 crc kubenswrapper[4884]: I1210 01:18:02.319175 4884 generic.go:334] "Generic (PLEG): container finished" podID="93f31477-32e4-4873-95f9-43327b02f0c8" containerID="82e64fe413897bef427e533db3afa5750afcf6315ec1fc4bc5709ef8adbe1cba" exitCode=0 Dec 10 01:18:02 crc kubenswrapper[4884]: I1210 01:18:02.319331 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" event={"ID":"93f31477-32e4-4873-95f9-43327b02f0c8","Type":"ContainerDied","Data":"82e64fe413897bef427e533db3afa5750afcf6315ec1fc4bc5709ef8adbe1cba"} Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.818202 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.847885 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srn6h\" (UniqueName: \"kubernetes.io/projected/93f31477-32e4-4873-95f9-43327b02f0c8-kube-api-access-srn6h\") pod \"93f31477-32e4-4873-95f9-43327b02f0c8\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.848036 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-libvirt-secret-0\") pod \"93f31477-32e4-4873-95f9-43327b02f0c8\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.848142 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-inventory\") pod \"93f31477-32e4-4873-95f9-43327b02f0c8\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.848165 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-ssh-key\") pod \"93f31477-32e4-4873-95f9-43327b02f0c8\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.848241 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-libvirt-combined-ca-bundle\") pod \"93f31477-32e4-4873-95f9-43327b02f0c8\" (UID: \"93f31477-32e4-4873-95f9-43327b02f0c8\") " Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.856031 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93f31477-32e4-4873-95f9-43327b02f0c8-kube-api-access-srn6h" (OuterVolumeSpecName: "kube-api-access-srn6h") pod "93f31477-32e4-4873-95f9-43327b02f0c8" (UID: "93f31477-32e4-4873-95f9-43327b02f0c8"). InnerVolumeSpecName "kube-api-access-srn6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.865730 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "93f31477-32e4-4873-95f9-43327b02f0c8" (UID: "93f31477-32e4-4873-95f9-43327b02f0c8"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.885514 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "93f31477-32e4-4873-95f9-43327b02f0c8" (UID: "93f31477-32e4-4873-95f9-43327b02f0c8"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.890079 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-inventory" (OuterVolumeSpecName: "inventory") pod "93f31477-32e4-4873-95f9-43327b02f0c8" (UID: "93f31477-32e4-4873-95f9-43327b02f0c8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.907048 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "93f31477-32e4-4873-95f9-43327b02f0c8" (UID: "93f31477-32e4-4873-95f9-43327b02f0c8"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.951925 4884 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.951967 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srn6h\" (UniqueName: \"kubernetes.io/projected/93f31477-32e4-4873-95f9-43327b02f0c8-kube-api-access-srn6h\") on node \"crc\" DevicePath \"\"" Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.951981 4884 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.951995 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:18:03 crc kubenswrapper[4884]: I1210 01:18:03.952006 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/93f31477-32e4-4873-95f9-43327b02f0c8-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.339730 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" event={"ID":"93f31477-32e4-4873-95f9-43327b02f0c8","Type":"ContainerDied","Data":"728ceb4cf3b9477b9cd1bf5bd5194f7bd954f54d73d339c56f1bc14e3f2a14fe"} Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.339768 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="728ceb4cf3b9477b9cd1bf5bd5194f7bd954f54d73d339c56f1bc14e3f2a14fe" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.339795 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.480366 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9"] Dec 10 01:18:04 crc kubenswrapper[4884]: E1210 01:18:04.481009 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93f31477-32e4-4873-95f9-43327b02f0c8" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.481033 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93f31477-32e4-4873-95f9-43327b02f0c8" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 10 01:18:04 crc kubenswrapper[4884]: E1210 01:18:04.481043 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21296a82-a64c-4080-8048-c3e408ac6ffd" containerName="collect-profiles" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.481054 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="21296a82-a64c-4080-8048-c3e408ac6ffd" containerName="collect-profiles" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.481334 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="21296a82-a64c-4080-8048-c3e408ac6ffd" containerName="collect-profiles" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.481374 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="93f31477-32e4-4873-95f9-43327b02f0c8" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.482277 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.485898 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.486130 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.486194 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.486197 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.486132 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.490769 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9"] Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.564967 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.565031 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.565106 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.565165 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.565192 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxhnr\" (UniqueName: \"kubernetes.io/projected/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-kube-api-access-gxhnr\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.565255 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.565418 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.667330 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.667404 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.667480 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.667535 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.667581 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.667606 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxhnr\" (UniqueName: \"kubernetes.io/projected/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-kube-api-access-gxhnr\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.667657 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.673166 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.674115 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.674213 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.675036 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.677845 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.686038 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.689475 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxhnr\" (UniqueName: \"kubernetes.io/projected/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-kube-api-access-gxhnr\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-582t9\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:04 crc kubenswrapper[4884]: I1210 01:18:04.804853 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:18:05 crc kubenswrapper[4884]: I1210 01:18:05.404423 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9"] Dec 10 01:18:06 crc kubenswrapper[4884]: I1210 01:18:06.366902 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" event={"ID":"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b","Type":"ContainerStarted","Data":"e54e84525bda9ca3ce2121a139f1d11624340a6c75e4d529a0f8922a049393bf"} Dec 10 01:18:06 crc kubenswrapper[4884]: I1210 01:18:06.367227 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" event={"ID":"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b","Type":"ContainerStarted","Data":"98418b8d04ba45db00fef8f923fdd4fd8b6e9d1614b2116b4fc359a895b7d5e7"} Dec 10 01:18:06 crc kubenswrapper[4884]: I1210 01:18:06.394587 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" podStartSLOduration=1.764096501 podStartE2EDuration="2.394564247s" podCreationTimestamp="2025-12-10 01:18:04 +0000 UTC" firstStartedPulling="2025-12-10 01:18:05.404144717 +0000 UTC m=+2858.482101844" lastFinishedPulling="2025-12-10 01:18:06.034612473 +0000 UTC m=+2859.112569590" observedRunningTime="2025-12-10 01:18:06.38470783 +0000 UTC m=+2859.462664957" watchObservedRunningTime="2025-12-10 01:18:06.394564247 +0000 UTC m=+2859.472521374" Dec 10 01:18:08 crc kubenswrapper[4884]: E1210 01:18:08.288864 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:18:10 crc kubenswrapper[4884]: E1210 01:18:10.289095 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:18:18 crc kubenswrapper[4884]: I1210 01:18:18.098262 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:18:18 crc kubenswrapper[4884]: I1210 01:18:18.098919 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:18:21 crc kubenswrapper[4884]: E1210 01:18:21.297980 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:18:24 crc kubenswrapper[4884]: E1210 01:18:24.289519 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:18:36 crc kubenswrapper[4884]: E1210 01:18:36.290214 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:18:39 crc kubenswrapper[4884]: E1210 01:18:39.290879 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:18:47 crc kubenswrapper[4884]: E1210 01:18:47.306714 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:18:48 crc kubenswrapper[4884]: I1210 01:18:48.098972 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:18:48 crc kubenswrapper[4884]: I1210 01:18:48.099053 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:18:50 crc kubenswrapper[4884]: E1210 01:18:50.290423 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:18:59 crc kubenswrapper[4884]: E1210 01:18:59.290843 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:19:01 crc kubenswrapper[4884]: E1210 01:19:01.291859 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:19:05 crc kubenswrapper[4884]: I1210 01:19:05.664866 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t4wql"] Dec 10 01:19:05 crc kubenswrapper[4884]: I1210 01:19:05.671460 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:05 crc kubenswrapper[4884]: I1210 01:19:05.715406 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t4wql"] Dec 10 01:19:05 crc kubenswrapper[4884]: I1210 01:19:05.820613 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89f6fe88-40b5-4671-b5f5-eba1061d3467-utilities\") pod \"community-operators-t4wql\" (UID: \"89f6fe88-40b5-4671-b5f5-eba1061d3467\") " pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:05 crc kubenswrapper[4884]: I1210 01:19:05.820945 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmh4r\" (UniqueName: \"kubernetes.io/projected/89f6fe88-40b5-4671-b5f5-eba1061d3467-kube-api-access-jmh4r\") pod \"community-operators-t4wql\" (UID: \"89f6fe88-40b5-4671-b5f5-eba1061d3467\") " pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:05 crc kubenswrapper[4884]: I1210 01:19:05.821016 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89f6fe88-40b5-4671-b5f5-eba1061d3467-catalog-content\") pod \"community-operators-t4wql\" (UID: \"89f6fe88-40b5-4671-b5f5-eba1061d3467\") " pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:05 crc kubenswrapper[4884]: I1210 01:19:05.922715 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89f6fe88-40b5-4671-b5f5-eba1061d3467-utilities\") pod \"community-operators-t4wql\" (UID: \"89f6fe88-40b5-4671-b5f5-eba1061d3467\") " pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:05 crc kubenswrapper[4884]: I1210 01:19:05.922774 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmh4r\" (UniqueName: \"kubernetes.io/projected/89f6fe88-40b5-4671-b5f5-eba1061d3467-kube-api-access-jmh4r\") pod \"community-operators-t4wql\" (UID: \"89f6fe88-40b5-4671-b5f5-eba1061d3467\") " pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:05 crc kubenswrapper[4884]: I1210 01:19:05.922829 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89f6fe88-40b5-4671-b5f5-eba1061d3467-catalog-content\") pod \"community-operators-t4wql\" (UID: \"89f6fe88-40b5-4671-b5f5-eba1061d3467\") " pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:05 crc kubenswrapper[4884]: I1210 01:19:05.923257 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89f6fe88-40b5-4671-b5f5-eba1061d3467-utilities\") pod \"community-operators-t4wql\" (UID: \"89f6fe88-40b5-4671-b5f5-eba1061d3467\") " pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:05 crc kubenswrapper[4884]: I1210 01:19:05.923307 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89f6fe88-40b5-4671-b5f5-eba1061d3467-catalog-content\") pod \"community-operators-t4wql\" (UID: \"89f6fe88-40b5-4671-b5f5-eba1061d3467\") " pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:05 crc kubenswrapper[4884]: I1210 01:19:05.941072 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmh4r\" (UniqueName: \"kubernetes.io/projected/89f6fe88-40b5-4671-b5f5-eba1061d3467-kube-api-access-jmh4r\") pod \"community-operators-t4wql\" (UID: \"89f6fe88-40b5-4671-b5f5-eba1061d3467\") " pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:06 crc kubenswrapper[4884]: I1210 01:19:06.002228 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:06 crc kubenswrapper[4884]: I1210 01:19:06.574720 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t4wql"] Dec 10 01:19:07 crc kubenswrapper[4884]: I1210 01:19:07.103398 4884 generic.go:334] "Generic (PLEG): container finished" podID="89f6fe88-40b5-4671-b5f5-eba1061d3467" containerID="a2ef8fe2918c00d9a5b88cdde541c1a65aa204f86b93f97c24e24322a782dcf0" exitCode=0 Dec 10 01:19:07 crc kubenswrapper[4884]: I1210 01:19:07.103591 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t4wql" event={"ID":"89f6fe88-40b5-4671-b5f5-eba1061d3467","Type":"ContainerDied","Data":"a2ef8fe2918c00d9a5b88cdde541c1a65aa204f86b93f97c24e24322a782dcf0"} Dec 10 01:19:07 crc kubenswrapper[4884]: I1210 01:19:07.103784 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t4wql" event={"ID":"89f6fe88-40b5-4671-b5f5-eba1061d3467","Type":"ContainerStarted","Data":"69dbef323a0cfacd7773bf0547679607e76aae3caf19be8eb91a815fe6d577d9"} Dec 10 01:19:08 crc kubenswrapper[4884]: I1210 01:19:08.115055 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t4wql" event={"ID":"89f6fe88-40b5-4671-b5f5-eba1061d3467","Type":"ContainerStarted","Data":"e05f72638b11089d54e1dca4cc6ecd9fb2d5cf80578bcce9105b344089f1960d"} Dec 10 01:19:09 crc kubenswrapper[4884]: I1210 01:19:09.134537 4884 generic.go:334] "Generic (PLEG): container finished" podID="89f6fe88-40b5-4671-b5f5-eba1061d3467" containerID="e05f72638b11089d54e1dca4cc6ecd9fb2d5cf80578bcce9105b344089f1960d" exitCode=0 Dec 10 01:19:09 crc kubenswrapper[4884]: I1210 01:19:09.134597 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t4wql" event={"ID":"89f6fe88-40b5-4671-b5f5-eba1061d3467","Type":"ContainerDied","Data":"e05f72638b11089d54e1dca4cc6ecd9fb2d5cf80578bcce9105b344089f1960d"} Dec 10 01:19:09 crc kubenswrapper[4884]: I1210 01:19:09.137324 4884 generic.go:334] "Generic (PLEG): container finished" podID="a14d2c3e-ab42-422b-9e3e-e716a86dfe8b" containerID="e54e84525bda9ca3ce2121a139f1d11624340a6c75e4d529a0f8922a049393bf" exitCode=2 Dec 10 01:19:09 crc kubenswrapper[4884]: I1210 01:19:09.137356 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" event={"ID":"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b","Type":"ContainerDied","Data":"e54e84525bda9ca3ce2121a139f1d11624340a6c75e4d529a0f8922a049393bf"} Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.153527 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t4wql" event={"ID":"89f6fe88-40b5-4671-b5f5-eba1061d3467","Type":"ContainerStarted","Data":"f8cac88b76d5d7d380dd53e80b8d12b6580342e060def546bd4975340e086e8f"} Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.188426 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t4wql" podStartSLOduration=2.681769431 podStartE2EDuration="5.188403862s" podCreationTimestamp="2025-12-10 01:19:05 +0000 UTC" firstStartedPulling="2025-12-10 01:19:07.108487723 +0000 UTC m=+2920.186444870" lastFinishedPulling="2025-12-10 01:19:09.615122174 +0000 UTC m=+2922.693079301" observedRunningTime="2025-12-10 01:19:10.183708915 +0000 UTC m=+2923.261666112" watchObservedRunningTime="2025-12-10 01:19:10.188403862 +0000 UTC m=+2923.266360969" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.681259 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.746158 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ssh-key\") pod \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.746391 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-telemetry-combined-ca-bundle\") pod \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.746650 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-inventory\") pod \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.746717 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-2\") pod \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.746772 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-0\") pod \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.746805 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxhnr\" (UniqueName: \"kubernetes.io/projected/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-kube-api-access-gxhnr\") pod \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.746852 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-1\") pod \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\" (UID: \"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b\") " Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.775250 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-kube-api-access-gxhnr" (OuterVolumeSpecName: "kube-api-access-gxhnr") pod "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b" (UID: "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b"). InnerVolumeSpecName "kube-api-access-gxhnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.776711 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b" (UID: "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.781235 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b" (UID: "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.784740 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b" (UID: "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.796427 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b" (UID: "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.800134 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b" (UID: "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.808058 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-inventory" (OuterVolumeSpecName: "inventory") pod "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b" (UID: "a14d2c3e-ab42-422b-9e3e-e716a86dfe8b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.849220 4884 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.849253 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.849265 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.849278 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.849290 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxhnr\" (UniqueName: \"kubernetes.io/projected/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-kube-api-access-gxhnr\") on node \"crc\" DevicePath \"\"" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.849303 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 10 01:19:10 crc kubenswrapper[4884]: I1210 01:19:10.849425 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a14d2c3e-ab42-422b-9e3e-e716a86dfe8b-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:19:11 crc kubenswrapper[4884]: I1210 01:19:11.169380 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" event={"ID":"a14d2c3e-ab42-422b-9e3e-e716a86dfe8b","Type":"ContainerDied","Data":"98418b8d04ba45db00fef8f923fdd4fd8b6e9d1614b2116b4fc359a895b7d5e7"} Dec 10 01:19:11 crc kubenswrapper[4884]: I1210 01:19:11.169424 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98418b8d04ba45db00fef8f923fdd4fd8b6e9d1614b2116b4fc359a895b7d5e7" Dec 10 01:19:11 crc kubenswrapper[4884]: I1210 01:19:11.170852 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-582t9" Dec 10 01:19:14 crc kubenswrapper[4884]: E1210 01:19:14.291198 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:19:15 crc kubenswrapper[4884]: E1210 01:19:15.291323 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:19:16 crc kubenswrapper[4884]: I1210 01:19:16.002978 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:16 crc kubenswrapper[4884]: I1210 01:19:16.003083 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:16 crc kubenswrapper[4884]: I1210 01:19:16.076144 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:16 crc kubenswrapper[4884]: I1210 01:19:16.319946 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:16 crc kubenswrapper[4884]: I1210 01:19:16.394106 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t4wql"] Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.096115 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk"] Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.097760 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.097835 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:19:18 crc kubenswrapper[4884]: E1210 01:19:18.097958 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a14d2c3e-ab42-422b-9e3e-e716a86dfe8b" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.097984 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a14d2c3e-ab42-422b-9e3e-e716a86dfe8b" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.099155 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a14d2c3e-ab42-422b-9e3e-e716a86dfe8b" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.100491 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.100608 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.101186 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.101247 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" gracePeriod=600 Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.102995 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.103266 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.103398 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.103518 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.115477 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.125972 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk"] Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.245923 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.246387 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.246462 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.246559 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vq6jf\" (UniqueName: \"kubernetes.io/projected/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-kube-api-access-vq6jf\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.246693 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.246760 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.246876 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.277632 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-t4wql" podUID="89f6fe88-40b5-4671-b5f5-eba1061d3467" containerName="registry-server" containerID="cri-o://f8cac88b76d5d7d380dd53e80b8d12b6580342e060def546bd4975340e086e8f" gracePeriod=2 Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.349364 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.349473 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.350292 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vq6jf\" (UniqueName: \"kubernetes.io/projected/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-kube-api-access-vq6jf\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.350416 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.350544 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.351872 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.352030 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.355396 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.356108 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.356709 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.357229 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.357343 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.362043 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.370931 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vq6jf\" (UniqueName: \"kubernetes.io/projected/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-kube-api-access-vq6jf\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: I1210 01:19:18.427925 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:19:18 crc kubenswrapper[4884]: E1210 01:19:18.734948 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.047487 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk"] Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.299804 4884 generic.go:334] "Generic (PLEG): container finished" podID="89f6fe88-40b5-4671-b5f5-eba1061d3467" containerID="f8cac88b76d5d7d380dd53e80b8d12b6580342e060def546bd4975340e086e8f" exitCode=0 Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.302572 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" exitCode=0 Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.306414 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t4wql" event={"ID":"89f6fe88-40b5-4671-b5f5-eba1061d3467","Type":"ContainerDied","Data":"f8cac88b76d5d7d380dd53e80b8d12b6580342e060def546bd4975340e086e8f"} Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.306492 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t4wql" event={"ID":"89f6fe88-40b5-4671-b5f5-eba1061d3467","Type":"ContainerDied","Data":"69dbef323a0cfacd7773bf0547679607e76aae3caf19be8eb91a815fe6d577d9"} Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.306509 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69dbef323a0cfacd7773bf0547679607e76aae3caf19be8eb91a815fe6d577d9" Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.306550 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97"} Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.306576 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" event={"ID":"2149c412-a8d9-4bfe-b1b7-e901dcc5d132","Type":"ContainerStarted","Data":"fba440c0914ae8f7065cf521865303e205b4889268540f9ebd182554f9e598ae"} Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.306622 4884 scope.go:117] "RemoveContainer" containerID="63276cf5fac278ac47ca3f840ee652628693680ece5a047e41350771c1712289" Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.307458 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:19:19 crc kubenswrapper[4884]: E1210 01:19:19.307924 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.336932 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.476911 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmh4r\" (UniqueName: \"kubernetes.io/projected/89f6fe88-40b5-4671-b5f5-eba1061d3467-kube-api-access-jmh4r\") pod \"89f6fe88-40b5-4671-b5f5-eba1061d3467\" (UID: \"89f6fe88-40b5-4671-b5f5-eba1061d3467\") " Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.476970 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89f6fe88-40b5-4671-b5f5-eba1061d3467-utilities\") pod \"89f6fe88-40b5-4671-b5f5-eba1061d3467\" (UID: \"89f6fe88-40b5-4671-b5f5-eba1061d3467\") " Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.477194 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89f6fe88-40b5-4671-b5f5-eba1061d3467-catalog-content\") pod \"89f6fe88-40b5-4671-b5f5-eba1061d3467\" (UID: \"89f6fe88-40b5-4671-b5f5-eba1061d3467\") " Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.478748 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89f6fe88-40b5-4671-b5f5-eba1061d3467-utilities" (OuterVolumeSpecName: "utilities") pod "89f6fe88-40b5-4671-b5f5-eba1061d3467" (UID: "89f6fe88-40b5-4671-b5f5-eba1061d3467"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.482920 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89f6fe88-40b5-4671-b5f5-eba1061d3467-kube-api-access-jmh4r" (OuterVolumeSpecName: "kube-api-access-jmh4r") pod "89f6fe88-40b5-4671-b5f5-eba1061d3467" (UID: "89f6fe88-40b5-4671-b5f5-eba1061d3467"). InnerVolumeSpecName "kube-api-access-jmh4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.535639 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89f6fe88-40b5-4671-b5f5-eba1061d3467-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "89f6fe88-40b5-4671-b5f5-eba1061d3467" (UID: "89f6fe88-40b5-4671-b5f5-eba1061d3467"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.580175 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmh4r\" (UniqueName: \"kubernetes.io/projected/89f6fe88-40b5-4671-b5f5-eba1061d3467-kube-api-access-jmh4r\") on node \"crc\" DevicePath \"\"" Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.580213 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89f6fe88-40b5-4671-b5f5-eba1061d3467-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:19:19 crc kubenswrapper[4884]: I1210 01:19:19.580223 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89f6fe88-40b5-4671-b5f5-eba1061d3467-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:19:20 crc kubenswrapper[4884]: I1210 01:19:20.321678 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t4wql" Dec 10 01:19:20 crc kubenswrapper[4884]: I1210 01:19:20.322568 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" event={"ID":"2149c412-a8d9-4bfe-b1b7-e901dcc5d132","Type":"ContainerStarted","Data":"a7ff7f7d1382390aa4ecef61f7141443e236c7c376e73bb6e3ffaa2af3120dbe"} Dec 10 01:19:20 crc kubenswrapper[4884]: I1210 01:19:20.362966 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" podStartSLOduration=1.8783579449999999 podStartE2EDuration="2.362942492s" podCreationTimestamp="2025-12-10 01:19:18 +0000 UTC" firstStartedPulling="2025-12-10 01:19:19.066845229 +0000 UTC m=+2932.144802346" lastFinishedPulling="2025-12-10 01:19:19.551429756 +0000 UTC m=+2932.629386893" observedRunningTime="2025-12-10 01:19:20.34513143 +0000 UTC m=+2933.423088587" watchObservedRunningTime="2025-12-10 01:19:20.362942492 +0000 UTC m=+2933.440899629" Dec 10 01:19:20 crc kubenswrapper[4884]: I1210 01:19:20.390112 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t4wql"] Dec 10 01:19:20 crc kubenswrapper[4884]: I1210 01:19:20.402019 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-t4wql"] Dec 10 01:19:21 crc kubenswrapper[4884]: I1210 01:19:21.303971 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89f6fe88-40b5-4671-b5f5-eba1061d3467" path="/var/lib/kubelet/pods/89f6fe88-40b5-4671-b5f5-eba1061d3467/volumes" Dec 10 01:19:28 crc kubenswrapper[4884]: E1210 01:19:28.291038 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:19:28 crc kubenswrapper[4884]: E1210 01:19:28.291086 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:19:31 crc kubenswrapper[4884]: I1210 01:19:31.288169 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:19:31 crc kubenswrapper[4884]: E1210 01:19:31.289272 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:19:42 crc kubenswrapper[4884]: E1210 01:19:42.290517 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:19:43 crc kubenswrapper[4884]: E1210 01:19:43.290064 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:19:46 crc kubenswrapper[4884]: I1210 01:19:46.287298 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:19:46 crc kubenswrapper[4884]: E1210 01:19:46.287960 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:19:55 crc kubenswrapper[4884]: E1210 01:19:55.290468 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:19:57 crc kubenswrapper[4884]: E1210 01:19:57.307201 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:20:01 crc kubenswrapper[4884]: I1210 01:20:01.287573 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:20:01 crc kubenswrapper[4884]: E1210 01:20:01.288700 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:20:06 crc kubenswrapper[4884]: E1210 01:20:06.289485 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:20:11 crc kubenswrapper[4884]: E1210 01:20:11.289404 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:20:16 crc kubenswrapper[4884]: I1210 01:20:16.287719 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:20:16 crc kubenswrapper[4884]: E1210 01:20:16.288610 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:20:19 crc kubenswrapper[4884]: I1210 01:20:19.131461 4884 generic.go:334] "Generic (PLEG): container finished" podID="2149c412-a8d9-4bfe-b1b7-e901dcc5d132" containerID="a7ff7f7d1382390aa4ecef61f7141443e236c7c376e73bb6e3ffaa2af3120dbe" exitCode=2 Dec 10 01:20:19 crc kubenswrapper[4884]: I1210 01:20:19.132859 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" event={"ID":"2149c412-a8d9-4bfe-b1b7-e901dcc5d132","Type":"ContainerDied","Data":"a7ff7f7d1382390aa4ecef61f7141443e236c7c376e73bb6e3ffaa2af3120dbe"} Dec 10 01:20:20 crc kubenswrapper[4884]: I1210 01:20:20.288868 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 01:20:20 crc kubenswrapper[4884]: E1210 01:20:20.408279 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:20:20 crc kubenswrapper[4884]: E1210 01:20:20.408343 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:20:20 crc kubenswrapper[4884]: E1210 01:20:20.408471 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:20:20 crc kubenswrapper[4884]: E1210 01:20:20.409684 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.154812 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" event={"ID":"2149c412-a8d9-4bfe-b1b7-e901dcc5d132","Type":"ContainerDied","Data":"fba440c0914ae8f7065cf521865303e205b4889268540f9ebd182554f9e598ae"} Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.155035 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fba440c0914ae8f7065cf521865303e205b4889268540f9ebd182554f9e598ae" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.194424 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.303152 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ssh-key\") pod \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.303271 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-inventory\") pod \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.303329 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vq6jf\" (UniqueName: \"kubernetes.io/projected/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-kube-api-access-vq6jf\") pod \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.303369 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-2\") pod \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.303409 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-1\") pod \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.303452 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-0\") pod \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.303589 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-telemetry-combined-ca-bundle\") pod \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\" (UID: \"2149c412-a8d9-4bfe-b1b7-e901dcc5d132\") " Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.308543 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-kube-api-access-vq6jf" (OuterVolumeSpecName: "kube-api-access-vq6jf") pod "2149c412-a8d9-4bfe-b1b7-e901dcc5d132" (UID: "2149c412-a8d9-4bfe-b1b7-e901dcc5d132"). InnerVolumeSpecName "kube-api-access-vq6jf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.327253 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "2149c412-a8d9-4bfe-b1b7-e901dcc5d132" (UID: "2149c412-a8d9-4bfe-b1b7-e901dcc5d132"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.333312 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2149c412-a8d9-4bfe-b1b7-e901dcc5d132" (UID: "2149c412-a8d9-4bfe-b1b7-e901dcc5d132"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.340815 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "2149c412-a8d9-4bfe-b1b7-e901dcc5d132" (UID: "2149c412-a8d9-4bfe-b1b7-e901dcc5d132"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.344608 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-inventory" (OuterVolumeSpecName: "inventory") pod "2149c412-a8d9-4bfe-b1b7-e901dcc5d132" (UID: "2149c412-a8d9-4bfe-b1b7-e901dcc5d132"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.347718 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "2149c412-a8d9-4bfe-b1b7-e901dcc5d132" (UID: "2149c412-a8d9-4bfe-b1b7-e901dcc5d132"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.374368 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "2149c412-a8d9-4bfe-b1b7-e901dcc5d132" (UID: "2149c412-a8d9-4bfe-b1b7-e901dcc5d132"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.416744 4884 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.416780 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.416792 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.416803 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vq6jf\" (UniqueName: \"kubernetes.io/projected/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-kube-api-access-vq6jf\") on node \"crc\" DevicePath \"\"" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.416812 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.416822 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 10 01:20:21 crc kubenswrapper[4884]: I1210 01:20:21.416832 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/2149c412-a8d9-4bfe-b1b7-e901dcc5d132-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:20:22 crc kubenswrapper[4884]: I1210 01:20:22.164889 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk" Dec 10 01:20:24 crc kubenswrapper[4884]: E1210 01:20:24.428138 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:20:24 crc kubenswrapper[4884]: E1210 01:20:24.428597 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:20:24 crc kubenswrapper[4884]: E1210 01:20:24.428833 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:20:24 crc kubenswrapper[4884]: E1210 01:20:24.430874 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:20:27 crc kubenswrapper[4884]: I1210 01:20:27.307812 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:20:27 crc kubenswrapper[4884]: E1210 01:20:27.308537 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:20:33 crc kubenswrapper[4884]: E1210 01:20:33.293371 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:20:36 crc kubenswrapper[4884]: E1210 01:20:36.289022 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:20:38 crc kubenswrapper[4884]: I1210 01:20:38.286830 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:20:38 crc kubenswrapper[4884]: E1210 01:20:38.287373 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.036163 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq"] Dec 10 01:20:39 crc kubenswrapper[4884]: E1210 01:20:39.036945 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2149c412-a8d9-4bfe-b1b7-e901dcc5d132" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.036971 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2149c412-a8d9-4bfe-b1b7-e901dcc5d132" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:20:39 crc kubenswrapper[4884]: E1210 01:20:39.037004 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89f6fe88-40b5-4671-b5f5-eba1061d3467" containerName="extract-utilities" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.037013 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="89f6fe88-40b5-4671-b5f5-eba1061d3467" containerName="extract-utilities" Dec 10 01:20:39 crc kubenswrapper[4884]: E1210 01:20:39.037041 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89f6fe88-40b5-4671-b5f5-eba1061d3467" containerName="registry-server" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.037049 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="89f6fe88-40b5-4671-b5f5-eba1061d3467" containerName="registry-server" Dec 10 01:20:39 crc kubenswrapper[4884]: E1210 01:20:39.037066 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89f6fe88-40b5-4671-b5f5-eba1061d3467" containerName="extract-content" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.037073 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="89f6fe88-40b5-4671-b5f5-eba1061d3467" containerName="extract-content" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.037326 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="89f6fe88-40b5-4671-b5f5-eba1061d3467" containerName="registry-server" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.037360 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2149c412-a8d9-4bfe-b1b7-e901dcc5d132" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.039000 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.042300 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.043357 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.044312 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.045327 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.046853 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.066912 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq"] Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.133816 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.134331 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.134617 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.134658 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6dllz\" (UniqueName: \"kubernetes.io/projected/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-kube-api-access-6dllz\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.135458 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.135678 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.135721 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.237962 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.238635 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.238832 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.238978 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.239187 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.239230 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6dllz\" (UniqueName: \"kubernetes.io/projected/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-kube-api-access-6dllz\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.239261 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.244613 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.244729 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.244769 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.258536 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.260785 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.264263 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6dllz\" (UniqueName: \"kubernetes.io/projected/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-kube-api-access-6dllz\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.270457 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:39 crc kubenswrapper[4884]: I1210 01:20:39.415834 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:20:40 crc kubenswrapper[4884]: I1210 01:20:40.060986 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq"] Dec 10 01:20:40 crc kubenswrapper[4884]: I1210 01:20:40.384084 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" event={"ID":"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d","Type":"ContainerStarted","Data":"5d77193c373acea737ed9aeac827e0d93f6187145671e77a863ac64f4a75aac5"} Dec 10 01:20:41 crc kubenswrapper[4884]: I1210 01:20:41.397852 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" event={"ID":"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d","Type":"ContainerStarted","Data":"655c3a883ddedfef313aef89f149569083dc0a08c76feb6c0cc5c7fae0b2b58b"} Dec 10 01:20:41 crc kubenswrapper[4884]: I1210 01:20:41.434185 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" podStartSLOduration=1.9076313759999999 podStartE2EDuration="2.434157659s" podCreationTimestamp="2025-12-10 01:20:39 +0000 UTC" firstStartedPulling="2025-12-10 01:20:40.069544036 +0000 UTC m=+3013.147501153" lastFinishedPulling="2025-12-10 01:20:40.596070279 +0000 UTC m=+3013.674027436" observedRunningTime="2025-12-10 01:20:41.416896943 +0000 UTC m=+3014.494854100" watchObservedRunningTime="2025-12-10 01:20:41.434157659 +0000 UTC m=+3014.512114806" Dec 10 01:20:46 crc kubenswrapper[4884]: E1210 01:20:46.291022 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:20:51 crc kubenswrapper[4884]: I1210 01:20:51.288272 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:20:51 crc kubenswrapper[4884]: E1210 01:20:51.289740 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:20:51 crc kubenswrapper[4884]: E1210 01:20:51.292109 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:20:58 crc kubenswrapper[4884]: E1210 01:20:58.289977 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:21:06 crc kubenswrapper[4884]: I1210 01:21:06.288305 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:21:06 crc kubenswrapper[4884]: E1210 01:21:06.289475 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:21:06 crc kubenswrapper[4884]: E1210 01:21:06.289526 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:21:09 crc kubenswrapper[4884]: E1210 01:21:09.290875 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:21:17 crc kubenswrapper[4884]: I1210 01:21:17.296604 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:21:17 crc kubenswrapper[4884]: E1210 01:21:17.297550 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:21:18 crc kubenswrapper[4884]: E1210 01:21:18.290152 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:21:20 crc kubenswrapper[4884]: E1210 01:21:20.289267 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:21:30 crc kubenswrapper[4884]: I1210 01:21:30.287959 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:21:30 crc kubenswrapper[4884]: E1210 01:21:30.288768 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:21:31 crc kubenswrapper[4884]: E1210 01:21:31.289560 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:21:32 crc kubenswrapper[4884]: E1210 01:21:32.290133 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:21:43 crc kubenswrapper[4884]: I1210 01:21:43.208377 4884 generic.go:334] "Generic (PLEG): container finished" podID="8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d" containerID="655c3a883ddedfef313aef89f149569083dc0a08c76feb6c0cc5c7fae0b2b58b" exitCode=2 Dec 10 01:21:43 crc kubenswrapper[4884]: I1210 01:21:43.208954 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" event={"ID":"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d","Type":"ContainerDied","Data":"655c3a883ddedfef313aef89f149569083dc0a08c76feb6c0cc5c7fae0b2b58b"} Dec 10 01:21:43 crc kubenswrapper[4884]: I1210 01:21:43.287987 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:21:43 crc kubenswrapper[4884]: E1210 01:21:43.288267 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:21:43 crc kubenswrapper[4884]: E1210 01:21:43.290318 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:21:43 crc kubenswrapper[4884]: E1210 01:21:43.290404 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.710858 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.811652 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-0\") pod \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.811721 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-1\") pod \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.811778 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-2\") pod \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.811902 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-telemetry-combined-ca-bundle\") pod \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.812155 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-inventory\") pod \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.812177 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ssh-key\") pod \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.812250 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dllz\" (UniqueName: \"kubernetes.io/projected/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-kube-api-access-6dllz\") pod \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\" (UID: \"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d\") " Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.820759 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d" (UID: "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.821803 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-kube-api-access-6dllz" (OuterVolumeSpecName: "kube-api-access-6dllz") pod "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d" (UID: "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d"). InnerVolumeSpecName "kube-api-access-6dllz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.853795 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d" (UID: "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.854157 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d" (UID: "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.859666 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d" (UID: "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.865746 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d" (UID: "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.869710 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-inventory" (OuterVolumeSpecName: "inventory") pod "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d" (UID: "8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.914304 4884 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.914740 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.914750 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.914762 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6dllz\" (UniqueName: \"kubernetes.io/projected/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-kube-api-access-6dllz\") on node \"crc\" DevicePath \"\"" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.914770 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.914780 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 10 01:21:44 crc kubenswrapper[4884]: I1210 01:21:44.914789 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Dec 10 01:21:45 crc kubenswrapper[4884]: I1210 01:21:45.237338 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" event={"ID":"8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d","Type":"ContainerDied","Data":"5d77193c373acea737ed9aeac827e0d93f6187145671e77a863ac64f4a75aac5"} Dec 10 01:21:45 crc kubenswrapper[4884]: I1210 01:21:45.237397 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d77193c373acea737ed9aeac827e0d93f6187145671e77a863ac64f4a75aac5" Dec 10 01:21:45 crc kubenswrapper[4884]: I1210 01:21:45.237406 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq" Dec 10 01:21:54 crc kubenswrapper[4884]: E1210 01:21:54.290462 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:21:54 crc kubenswrapper[4884]: E1210 01:21:54.290591 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:21:57 crc kubenswrapper[4884]: I1210 01:21:57.307240 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:21:57 crc kubenswrapper[4884]: E1210 01:21:57.310670 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:22:08 crc kubenswrapper[4884]: E1210 01:22:08.289857 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:22:08 crc kubenswrapper[4884]: E1210 01:22:08.290611 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:22:12 crc kubenswrapper[4884]: I1210 01:22:12.288072 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:22:12 crc kubenswrapper[4884]: E1210 01:22:12.288831 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.061712 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn"] Dec 10 01:22:22 crc kubenswrapper[4884]: E1210 01:22:22.062801 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.062907 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.063224 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.064331 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.067474 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.067987 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.068387 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.068598 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.069217 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.074689 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn"] Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.178826 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.179279 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.179409 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.179542 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.179712 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsh5r\" (UniqueName: \"kubernetes.io/projected/71585968-47e5-4291-8945-3af278090bd7-kube-api-access-zsh5r\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.179842 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.180021 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.282051 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.282278 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.282356 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.282412 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.282568 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsh5r\" (UniqueName: \"kubernetes.io/projected/71585968-47e5-4291-8945-3af278090bd7-kube-api-access-zsh5r\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.282665 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.282738 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.292723 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: E1210 01:22:22.294645 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.294910 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.295284 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.293705 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.298538 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.300059 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.302481 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsh5r\" (UniqueName: \"kubernetes.io/projected/71585968-47e5-4291-8945-3af278090bd7-kube-api-access-zsh5r\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:22 crc kubenswrapper[4884]: I1210 01:22:22.398961 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:22:23 crc kubenswrapper[4884]: I1210 01:22:23.079862 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn"] Dec 10 01:22:23 crc kubenswrapper[4884]: E1210 01:22:23.289836 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:22:23 crc kubenswrapper[4884]: I1210 01:22:23.767174 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" event={"ID":"71585968-47e5-4291-8945-3af278090bd7","Type":"ContainerStarted","Data":"c5114fea81a1b0cf8e57c93b6323a5396a1f33ee9c53e8b139f4842b88cd3aeb"} Dec 10 01:22:24 crc kubenswrapper[4884]: I1210 01:22:24.288277 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:22:24 crc kubenswrapper[4884]: E1210 01:22:24.289069 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:22:25 crc kubenswrapper[4884]: I1210 01:22:25.799788 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" event={"ID":"71585968-47e5-4291-8945-3af278090bd7","Type":"ContainerStarted","Data":"b5cd34693712e35b2b73aebfd11adc4df3a617cee3edc989121fd3427a36f731"} Dec 10 01:22:25 crc kubenswrapper[4884]: I1210 01:22:25.831909 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" podStartSLOduration=2.173204912 podStartE2EDuration="3.831883029s" podCreationTimestamp="2025-12-10 01:22:22 +0000 UTC" firstStartedPulling="2025-12-10 01:22:23.079027213 +0000 UTC m=+3116.156984360" lastFinishedPulling="2025-12-10 01:22:24.73770532 +0000 UTC m=+3117.815662477" observedRunningTime="2025-12-10 01:22:25.817694276 +0000 UTC m=+3118.895651433" watchObservedRunningTime="2025-12-10 01:22:25.831883029 +0000 UTC m=+3118.909840166" Dec 10 01:22:34 crc kubenswrapper[4884]: E1210 01:22:34.291020 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:22:34 crc kubenswrapper[4884]: E1210 01:22:34.291097 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:22:39 crc kubenswrapper[4884]: I1210 01:22:39.287718 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:22:39 crc kubenswrapper[4884]: E1210 01:22:39.288619 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:22:48 crc kubenswrapper[4884]: E1210 01:22:48.290393 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:22:49 crc kubenswrapper[4884]: E1210 01:22:49.294497 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:22:51 crc kubenswrapper[4884]: I1210 01:22:51.287499 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:22:51 crc kubenswrapper[4884]: E1210 01:22:51.288472 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:22:58 crc kubenswrapper[4884]: I1210 01:22:58.691850 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-szpcz"] Dec 10 01:22:58 crc kubenswrapper[4884]: I1210 01:22:58.703154 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:22:58 crc kubenswrapper[4884]: I1210 01:22:58.706568 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-szpcz"] Dec 10 01:22:58 crc kubenswrapper[4884]: I1210 01:22:58.832599 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dbc3721-42fe-4c6e-b92b-77d261226252-catalog-content\") pod \"redhat-marketplace-szpcz\" (UID: \"2dbc3721-42fe-4c6e-b92b-77d261226252\") " pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:22:58 crc kubenswrapper[4884]: I1210 01:22:58.832693 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dbc3721-42fe-4c6e-b92b-77d261226252-utilities\") pod \"redhat-marketplace-szpcz\" (UID: \"2dbc3721-42fe-4c6e-b92b-77d261226252\") " pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:22:58 crc kubenswrapper[4884]: I1210 01:22:58.832806 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rczm\" (UniqueName: \"kubernetes.io/projected/2dbc3721-42fe-4c6e-b92b-77d261226252-kube-api-access-6rczm\") pod \"redhat-marketplace-szpcz\" (UID: \"2dbc3721-42fe-4c6e-b92b-77d261226252\") " pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:22:58 crc kubenswrapper[4884]: I1210 01:22:58.934187 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rczm\" (UniqueName: \"kubernetes.io/projected/2dbc3721-42fe-4c6e-b92b-77d261226252-kube-api-access-6rczm\") pod \"redhat-marketplace-szpcz\" (UID: \"2dbc3721-42fe-4c6e-b92b-77d261226252\") " pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:22:58 crc kubenswrapper[4884]: I1210 01:22:58.934623 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dbc3721-42fe-4c6e-b92b-77d261226252-catalog-content\") pod \"redhat-marketplace-szpcz\" (UID: \"2dbc3721-42fe-4c6e-b92b-77d261226252\") " pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:22:58 crc kubenswrapper[4884]: I1210 01:22:58.934776 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dbc3721-42fe-4c6e-b92b-77d261226252-utilities\") pod \"redhat-marketplace-szpcz\" (UID: \"2dbc3721-42fe-4c6e-b92b-77d261226252\") " pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:22:58 crc kubenswrapper[4884]: I1210 01:22:58.935290 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dbc3721-42fe-4c6e-b92b-77d261226252-catalog-content\") pod \"redhat-marketplace-szpcz\" (UID: \"2dbc3721-42fe-4c6e-b92b-77d261226252\") " pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:22:58 crc kubenswrapper[4884]: I1210 01:22:58.935420 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dbc3721-42fe-4c6e-b92b-77d261226252-utilities\") pod \"redhat-marketplace-szpcz\" (UID: \"2dbc3721-42fe-4c6e-b92b-77d261226252\") " pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:22:58 crc kubenswrapper[4884]: I1210 01:22:58.964733 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rczm\" (UniqueName: \"kubernetes.io/projected/2dbc3721-42fe-4c6e-b92b-77d261226252-kube-api-access-6rczm\") pod \"redhat-marketplace-szpcz\" (UID: \"2dbc3721-42fe-4c6e-b92b-77d261226252\") " pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:22:59 crc kubenswrapper[4884]: I1210 01:22:59.024581 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:22:59 crc kubenswrapper[4884]: I1210 01:22:59.592838 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-szpcz"] Dec 10 01:22:59 crc kubenswrapper[4884]: W1210 01:22:59.594494 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2dbc3721_42fe_4c6e_b92b_77d261226252.slice/crio-92d1691fb09f17edede3cdeba13f1e96e52a32bf326c8d046a0439a41413d593 WatchSource:0}: Error finding container 92d1691fb09f17edede3cdeba13f1e96e52a32bf326c8d046a0439a41413d593: Status 404 returned error can't find the container with id 92d1691fb09f17edede3cdeba13f1e96e52a32bf326c8d046a0439a41413d593 Dec 10 01:23:00 crc kubenswrapper[4884]: E1210 01:23:00.290143 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:23:00 crc kubenswrapper[4884]: I1210 01:23:00.367924 4884 generic.go:334] "Generic (PLEG): container finished" podID="2dbc3721-42fe-4c6e-b92b-77d261226252" containerID="c5b2438cb69a7e5c4b83d55af64b5dff5198cf19ccdb2b962c4c2ea0f4b6b6b3" exitCode=0 Dec 10 01:23:00 crc kubenswrapper[4884]: I1210 01:23:00.368031 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szpcz" event={"ID":"2dbc3721-42fe-4c6e-b92b-77d261226252","Type":"ContainerDied","Data":"c5b2438cb69a7e5c4b83d55af64b5dff5198cf19ccdb2b962c4c2ea0f4b6b6b3"} Dec 10 01:23:00 crc kubenswrapper[4884]: I1210 01:23:00.368178 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szpcz" event={"ID":"2dbc3721-42fe-4c6e-b92b-77d261226252","Type":"ContainerStarted","Data":"92d1691fb09f17edede3cdeba13f1e96e52a32bf326c8d046a0439a41413d593"} Dec 10 01:23:02 crc kubenswrapper[4884]: I1210 01:23:02.287112 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:23:02 crc kubenswrapper[4884]: E1210 01:23:02.287933 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:23:02 crc kubenswrapper[4884]: I1210 01:23:02.389247 4884 generic.go:334] "Generic (PLEG): container finished" podID="2dbc3721-42fe-4c6e-b92b-77d261226252" containerID="0e6d16ffa184b368d4d36f07a09818ecab73d35145b364513794c83d1aa2ffa9" exitCode=0 Dec 10 01:23:02 crc kubenswrapper[4884]: I1210 01:23:02.389307 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szpcz" event={"ID":"2dbc3721-42fe-4c6e-b92b-77d261226252","Type":"ContainerDied","Data":"0e6d16ffa184b368d4d36f07a09818ecab73d35145b364513794c83d1aa2ffa9"} Dec 10 01:23:03 crc kubenswrapper[4884]: E1210 01:23:03.289104 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:23:03 crc kubenswrapper[4884]: I1210 01:23:03.402382 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szpcz" event={"ID":"2dbc3721-42fe-4c6e-b92b-77d261226252","Type":"ContainerStarted","Data":"7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2"} Dec 10 01:23:03 crc kubenswrapper[4884]: I1210 01:23:03.432891 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-szpcz" podStartSLOduration=3.000749125 podStartE2EDuration="5.432871526s" podCreationTimestamp="2025-12-10 01:22:58 +0000 UTC" firstStartedPulling="2025-12-10 01:23:00.37087037 +0000 UTC m=+3153.448827487" lastFinishedPulling="2025-12-10 01:23:02.802992731 +0000 UTC m=+3155.880949888" observedRunningTime="2025-12-10 01:23:03.42594696 +0000 UTC m=+3156.503904107" watchObservedRunningTime="2025-12-10 01:23:03.432871526 +0000 UTC m=+3156.510828643" Dec 10 01:23:09 crc kubenswrapper[4884]: I1210 01:23:09.025901 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:23:09 crc kubenswrapper[4884]: I1210 01:23:09.026299 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:23:09 crc kubenswrapper[4884]: I1210 01:23:09.084873 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:23:09 crc kubenswrapper[4884]: I1210 01:23:09.545902 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:23:09 crc kubenswrapper[4884]: I1210 01:23:09.618032 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-szpcz"] Dec 10 01:23:11 crc kubenswrapper[4884]: I1210 01:23:11.513019 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-szpcz" podUID="2dbc3721-42fe-4c6e-b92b-77d261226252" containerName="registry-server" containerID="cri-o://7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2" gracePeriod=2 Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.024624 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.145281 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rczm\" (UniqueName: \"kubernetes.io/projected/2dbc3721-42fe-4c6e-b92b-77d261226252-kube-api-access-6rczm\") pod \"2dbc3721-42fe-4c6e-b92b-77d261226252\" (UID: \"2dbc3721-42fe-4c6e-b92b-77d261226252\") " Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.145461 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dbc3721-42fe-4c6e-b92b-77d261226252-utilities\") pod \"2dbc3721-42fe-4c6e-b92b-77d261226252\" (UID: \"2dbc3721-42fe-4c6e-b92b-77d261226252\") " Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.145674 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dbc3721-42fe-4c6e-b92b-77d261226252-catalog-content\") pod \"2dbc3721-42fe-4c6e-b92b-77d261226252\" (UID: \"2dbc3721-42fe-4c6e-b92b-77d261226252\") " Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.146341 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2dbc3721-42fe-4c6e-b92b-77d261226252-utilities" (OuterVolumeSpecName: "utilities") pod "2dbc3721-42fe-4c6e-b92b-77d261226252" (UID: "2dbc3721-42fe-4c6e-b92b-77d261226252"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.158616 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dbc3721-42fe-4c6e-b92b-77d261226252-kube-api-access-6rczm" (OuterVolumeSpecName: "kube-api-access-6rczm") pod "2dbc3721-42fe-4c6e-b92b-77d261226252" (UID: "2dbc3721-42fe-4c6e-b92b-77d261226252"). InnerVolumeSpecName "kube-api-access-6rczm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.168838 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2dbc3721-42fe-4c6e-b92b-77d261226252-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2dbc3721-42fe-4c6e-b92b-77d261226252" (UID: "2dbc3721-42fe-4c6e-b92b-77d261226252"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.247498 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2dbc3721-42fe-4c6e-b92b-77d261226252-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.247560 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2dbc3721-42fe-4c6e-b92b-77d261226252-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.247576 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rczm\" (UniqueName: \"kubernetes.io/projected/2dbc3721-42fe-4c6e-b92b-77d261226252-kube-api-access-6rczm\") on node \"crc\" DevicePath \"\"" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.529321 4884 generic.go:334] "Generic (PLEG): container finished" podID="2dbc3721-42fe-4c6e-b92b-77d261226252" containerID="7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2" exitCode=0 Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.529382 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-szpcz" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.529402 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szpcz" event={"ID":"2dbc3721-42fe-4c6e-b92b-77d261226252","Type":"ContainerDied","Data":"7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2"} Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.529827 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szpcz" event={"ID":"2dbc3721-42fe-4c6e-b92b-77d261226252","Type":"ContainerDied","Data":"92d1691fb09f17edede3cdeba13f1e96e52a32bf326c8d046a0439a41413d593"} Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.529856 4884 scope.go:117] "RemoveContainer" containerID="7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.550344 4884 scope.go:117] "RemoveContainer" containerID="0e6d16ffa184b368d4d36f07a09818ecab73d35145b364513794c83d1aa2ffa9" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.575200 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-szpcz"] Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.584814 4884 scope.go:117] "RemoveContainer" containerID="c5b2438cb69a7e5c4b83d55af64b5dff5198cf19ccdb2b962c4c2ea0f4b6b6b3" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.585503 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-szpcz"] Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.633863 4884 scope.go:117] "RemoveContainer" containerID="7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2" Dec 10 01:23:12 crc kubenswrapper[4884]: E1210 01:23:12.634346 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2\": container with ID starting with 7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2 not found: ID does not exist" containerID="7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.634385 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2"} err="failed to get container status \"7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2\": rpc error: code = NotFound desc = could not find container \"7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2\": container with ID starting with 7edebe55fb3e5b0112504458d2d5e4e9358e467d36942d5afd20167f6a4975a2 not found: ID does not exist" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.634411 4884 scope.go:117] "RemoveContainer" containerID="0e6d16ffa184b368d4d36f07a09818ecab73d35145b364513794c83d1aa2ffa9" Dec 10 01:23:12 crc kubenswrapper[4884]: E1210 01:23:12.634849 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e6d16ffa184b368d4d36f07a09818ecab73d35145b364513794c83d1aa2ffa9\": container with ID starting with 0e6d16ffa184b368d4d36f07a09818ecab73d35145b364513794c83d1aa2ffa9 not found: ID does not exist" containerID="0e6d16ffa184b368d4d36f07a09818ecab73d35145b364513794c83d1aa2ffa9" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.634869 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e6d16ffa184b368d4d36f07a09818ecab73d35145b364513794c83d1aa2ffa9"} err="failed to get container status \"0e6d16ffa184b368d4d36f07a09818ecab73d35145b364513794c83d1aa2ffa9\": rpc error: code = NotFound desc = could not find container \"0e6d16ffa184b368d4d36f07a09818ecab73d35145b364513794c83d1aa2ffa9\": container with ID starting with 0e6d16ffa184b368d4d36f07a09818ecab73d35145b364513794c83d1aa2ffa9 not found: ID does not exist" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.634883 4884 scope.go:117] "RemoveContainer" containerID="c5b2438cb69a7e5c4b83d55af64b5dff5198cf19ccdb2b962c4c2ea0f4b6b6b3" Dec 10 01:23:12 crc kubenswrapper[4884]: E1210 01:23:12.635067 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5b2438cb69a7e5c4b83d55af64b5dff5198cf19ccdb2b962c4c2ea0f4b6b6b3\": container with ID starting with c5b2438cb69a7e5c4b83d55af64b5dff5198cf19ccdb2b962c4c2ea0f4b6b6b3 not found: ID does not exist" containerID="c5b2438cb69a7e5c4b83d55af64b5dff5198cf19ccdb2b962c4c2ea0f4b6b6b3" Dec 10 01:23:12 crc kubenswrapper[4884]: I1210 01:23:12.635086 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5b2438cb69a7e5c4b83d55af64b5dff5198cf19ccdb2b962c4c2ea0f4b6b6b3"} err="failed to get container status \"c5b2438cb69a7e5c4b83d55af64b5dff5198cf19ccdb2b962c4c2ea0f4b6b6b3\": rpc error: code = NotFound desc = could not find container \"c5b2438cb69a7e5c4b83d55af64b5dff5198cf19ccdb2b962c4c2ea0f4b6b6b3\": container with ID starting with c5b2438cb69a7e5c4b83d55af64b5dff5198cf19ccdb2b962c4c2ea0f4b6b6b3 not found: ID does not exist" Dec 10 01:23:13 crc kubenswrapper[4884]: I1210 01:23:13.287787 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:23:13 crc kubenswrapper[4884]: E1210 01:23:13.288299 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:23:13 crc kubenswrapper[4884]: I1210 01:23:13.299772 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dbc3721-42fe-4c6e-b92b-77d261226252" path="/var/lib/kubelet/pods/2dbc3721-42fe-4c6e-b92b-77d261226252/volumes" Dec 10 01:23:14 crc kubenswrapper[4884]: E1210 01:23:14.289213 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:23:15 crc kubenswrapper[4884]: E1210 01:23:15.290899 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:23:24 crc kubenswrapper[4884]: I1210 01:23:24.698679 4884 generic.go:334] "Generic (PLEG): container finished" podID="71585968-47e5-4291-8945-3af278090bd7" containerID="b5cd34693712e35b2b73aebfd11adc4df3a617cee3edc989121fd3427a36f731" exitCode=2 Dec 10 01:23:24 crc kubenswrapper[4884]: I1210 01:23:24.698743 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" event={"ID":"71585968-47e5-4291-8945-3af278090bd7","Type":"ContainerDied","Data":"b5cd34693712e35b2b73aebfd11adc4df3a617cee3edc989121fd3427a36f731"} Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.202531 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.287293 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:23:26 crc kubenswrapper[4884]: E1210 01:23:26.287674 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.289512 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-telemetry-combined-ca-bundle\") pod \"71585968-47e5-4291-8945-3af278090bd7\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.289601 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ssh-key\") pod \"71585968-47e5-4291-8945-3af278090bd7\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.289670 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-inventory\") pod \"71585968-47e5-4291-8945-3af278090bd7\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.289867 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsh5r\" (UniqueName: \"kubernetes.io/projected/71585968-47e5-4291-8945-3af278090bd7-kube-api-access-zsh5r\") pod \"71585968-47e5-4291-8945-3af278090bd7\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.289909 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-1\") pod \"71585968-47e5-4291-8945-3af278090bd7\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.289979 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-0\") pod \"71585968-47e5-4291-8945-3af278090bd7\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.290032 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-2\") pod \"71585968-47e5-4291-8945-3af278090bd7\" (UID: \"71585968-47e5-4291-8945-3af278090bd7\") " Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.295735 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71585968-47e5-4291-8945-3af278090bd7-kube-api-access-zsh5r" (OuterVolumeSpecName: "kube-api-access-zsh5r") pod "71585968-47e5-4291-8945-3af278090bd7" (UID: "71585968-47e5-4291-8945-3af278090bd7"). InnerVolumeSpecName "kube-api-access-zsh5r". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.299630 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "71585968-47e5-4291-8945-3af278090bd7" (UID: "71585968-47e5-4291-8945-3af278090bd7"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.320276 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-inventory" (OuterVolumeSpecName: "inventory") pod "71585968-47e5-4291-8945-3af278090bd7" (UID: "71585968-47e5-4291-8945-3af278090bd7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.324654 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "71585968-47e5-4291-8945-3af278090bd7" (UID: "71585968-47e5-4291-8945-3af278090bd7"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.330790 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "71585968-47e5-4291-8945-3af278090bd7" (UID: "71585968-47e5-4291-8945-3af278090bd7"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.336593 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "71585968-47e5-4291-8945-3af278090bd7" (UID: "71585968-47e5-4291-8945-3af278090bd7"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.342298 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "71585968-47e5-4291-8945-3af278090bd7" (UID: "71585968-47e5-4291-8945-3af278090bd7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.393359 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.393392 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.393405 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zsh5r\" (UniqueName: \"kubernetes.io/projected/71585968-47e5-4291-8945-3af278090bd7-kube-api-access-zsh5r\") on node \"crc\" DevicePath \"\"" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.393419 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.393447 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.393459 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.393471 4884 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71585968-47e5-4291-8945-3af278090bd7-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.725240 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" event={"ID":"71585968-47e5-4291-8945-3af278090bd7","Type":"ContainerDied","Data":"c5114fea81a1b0cf8e57c93b6323a5396a1f33ee9c53e8b139f4842b88cd3aeb"} Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.725290 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5114fea81a1b0cf8e57c93b6323a5396a1f33ee9c53e8b139f4842b88cd3aeb" Dec 10 01:23:26 crc kubenswrapper[4884]: I1210 01:23:26.725329 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn" Dec 10 01:23:27 crc kubenswrapper[4884]: E1210 01:23:27.300504 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:23:29 crc kubenswrapper[4884]: E1210 01:23:29.304310 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:23:40 crc kubenswrapper[4884]: I1210 01:23:40.287119 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:23:40 crc kubenswrapper[4884]: E1210 01:23:40.287949 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:23:40 crc kubenswrapper[4884]: E1210 01:23:40.290343 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:23:42 crc kubenswrapper[4884]: E1210 01:23:42.291590 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.644683 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vnmc7"] Dec 10 01:23:50 crc kubenswrapper[4884]: E1210 01:23:50.645598 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dbc3721-42fe-4c6e-b92b-77d261226252" containerName="extract-utilities" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.645612 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dbc3721-42fe-4c6e-b92b-77d261226252" containerName="extract-utilities" Dec 10 01:23:50 crc kubenswrapper[4884]: E1210 01:23:50.645624 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71585968-47e5-4291-8945-3af278090bd7" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.645633 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="71585968-47e5-4291-8945-3af278090bd7" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:23:50 crc kubenswrapper[4884]: E1210 01:23:50.645644 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dbc3721-42fe-4c6e-b92b-77d261226252" containerName="registry-server" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.645650 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dbc3721-42fe-4c6e-b92b-77d261226252" containerName="registry-server" Dec 10 01:23:50 crc kubenswrapper[4884]: E1210 01:23:50.645692 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dbc3721-42fe-4c6e-b92b-77d261226252" containerName="extract-content" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.645698 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dbc3721-42fe-4c6e-b92b-77d261226252" containerName="extract-content" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.645895 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="71585968-47e5-4291-8945-3af278090bd7" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.645922 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dbc3721-42fe-4c6e-b92b-77d261226252" containerName="registry-server" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.647418 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.668490 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vnmc7"] Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.763793 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glsqb\" (UniqueName: \"kubernetes.io/projected/28081624-abfc-4960-991e-7ee3b3907690-kube-api-access-glsqb\") pod \"redhat-operators-vnmc7\" (UID: \"28081624-abfc-4960-991e-7ee3b3907690\") " pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.763844 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28081624-abfc-4960-991e-7ee3b3907690-catalog-content\") pod \"redhat-operators-vnmc7\" (UID: \"28081624-abfc-4960-991e-7ee3b3907690\") " pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.763964 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28081624-abfc-4960-991e-7ee3b3907690-utilities\") pod \"redhat-operators-vnmc7\" (UID: \"28081624-abfc-4960-991e-7ee3b3907690\") " pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.866068 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28081624-abfc-4960-991e-7ee3b3907690-utilities\") pod \"redhat-operators-vnmc7\" (UID: \"28081624-abfc-4960-991e-7ee3b3907690\") " pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.866188 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glsqb\" (UniqueName: \"kubernetes.io/projected/28081624-abfc-4960-991e-7ee3b3907690-kube-api-access-glsqb\") pod \"redhat-operators-vnmc7\" (UID: \"28081624-abfc-4960-991e-7ee3b3907690\") " pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.866211 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28081624-abfc-4960-991e-7ee3b3907690-catalog-content\") pod \"redhat-operators-vnmc7\" (UID: \"28081624-abfc-4960-991e-7ee3b3907690\") " pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.866837 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28081624-abfc-4960-991e-7ee3b3907690-catalog-content\") pod \"redhat-operators-vnmc7\" (UID: \"28081624-abfc-4960-991e-7ee3b3907690\") " pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.867066 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28081624-abfc-4960-991e-7ee3b3907690-utilities\") pod \"redhat-operators-vnmc7\" (UID: \"28081624-abfc-4960-991e-7ee3b3907690\") " pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.895131 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glsqb\" (UniqueName: \"kubernetes.io/projected/28081624-abfc-4960-991e-7ee3b3907690-kube-api-access-glsqb\") pod \"redhat-operators-vnmc7\" (UID: \"28081624-abfc-4960-991e-7ee3b3907690\") " pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:23:50 crc kubenswrapper[4884]: I1210 01:23:50.976572 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:23:51 crc kubenswrapper[4884]: I1210 01:23:51.444756 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vnmc7"] Dec 10 01:23:51 crc kubenswrapper[4884]: W1210 01:23:51.446361 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28081624_abfc_4960_991e_7ee3b3907690.slice/crio-d2c5dc598d173990ea94686df947c38d24e80b1dd52e96cf16a301f18a9741bd WatchSource:0}: Error finding container d2c5dc598d173990ea94686df947c38d24e80b1dd52e96cf16a301f18a9741bd: Status 404 returned error can't find the container with id d2c5dc598d173990ea94686df947c38d24e80b1dd52e96cf16a301f18a9741bd Dec 10 01:23:52 crc kubenswrapper[4884]: I1210 01:23:52.055155 4884 generic.go:334] "Generic (PLEG): container finished" podID="28081624-abfc-4960-991e-7ee3b3907690" containerID="b57c8b8ab25d7b39f8ea17a5f8214b622902ee68d3cd583c64345675c35b1c7b" exitCode=0 Dec 10 01:23:52 crc kubenswrapper[4884]: I1210 01:23:52.055251 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vnmc7" event={"ID":"28081624-abfc-4960-991e-7ee3b3907690","Type":"ContainerDied","Data":"b57c8b8ab25d7b39f8ea17a5f8214b622902ee68d3cd583c64345675c35b1c7b"} Dec 10 01:23:52 crc kubenswrapper[4884]: I1210 01:23:52.055418 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vnmc7" event={"ID":"28081624-abfc-4960-991e-7ee3b3907690","Type":"ContainerStarted","Data":"d2c5dc598d173990ea94686df947c38d24e80b1dd52e96cf16a301f18a9741bd"} Dec 10 01:23:52 crc kubenswrapper[4884]: I1210 01:23:52.287486 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:23:52 crc kubenswrapper[4884]: E1210 01:23:52.287962 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:23:54 crc kubenswrapper[4884]: I1210 01:23:54.079588 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vnmc7" event={"ID":"28081624-abfc-4960-991e-7ee3b3907690","Type":"ContainerStarted","Data":"92e3314310d178edf2eea64f7ff2c2b9ff9b90cd642adf1bcffa63565db5d43d"} Dec 10 01:23:54 crc kubenswrapper[4884]: E1210 01:23:54.288857 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:23:55 crc kubenswrapper[4884]: E1210 01:23:55.290512 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:23:56 crc kubenswrapper[4884]: I1210 01:23:56.106857 4884 generic.go:334] "Generic (PLEG): container finished" podID="28081624-abfc-4960-991e-7ee3b3907690" containerID="92e3314310d178edf2eea64f7ff2c2b9ff9b90cd642adf1bcffa63565db5d43d" exitCode=0 Dec 10 01:23:56 crc kubenswrapper[4884]: I1210 01:23:56.106968 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vnmc7" event={"ID":"28081624-abfc-4960-991e-7ee3b3907690","Type":"ContainerDied","Data":"92e3314310d178edf2eea64f7ff2c2b9ff9b90cd642adf1bcffa63565db5d43d"} Dec 10 01:23:57 crc kubenswrapper[4884]: I1210 01:23:57.120676 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vnmc7" event={"ID":"28081624-abfc-4960-991e-7ee3b3907690","Type":"ContainerStarted","Data":"d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af"} Dec 10 01:23:57 crc kubenswrapper[4884]: I1210 01:23:57.150814 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vnmc7" podStartSLOduration=2.616868711 podStartE2EDuration="7.15079055s" podCreationTimestamp="2025-12-10 01:23:50 +0000 UTC" firstStartedPulling="2025-12-10 01:23:52.057362276 +0000 UTC m=+3205.135319383" lastFinishedPulling="2025-12-10 01:23:56.591284105 +0000 UTC m=+3209.669241222" observedRunningTime="2025-12-10 01:23:57.137988164 +0000 UTC m=+3210.215945291" watchObservedRunningTime="2025-12-10 01:23:57.15079055 +0000 UTC m=+3210.228747667" Dec 10 01:24:00 crc kubenswrapper[4884]: I1210 01:24:00.977068 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:24:00 crc kubenswrapper[4884]: I1210 01:24:00.977464 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:24:02 crc kubenswrapper[4884]: I1210 01:24:02.021178 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vnmc7" podUID="28081624-abfc-4960-991e-7ee3b3907690" containerName="registry-server" probeResult="failure" output=< Dec 10 01:24:02 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 01:24:02 crc kubenswrapper[4884]: > Dec 10 01:24:07 crc kubenswrapper[4884]: I1210 01:24:07.299117 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:24:07 crc kubenswrapper[4884]: E1210 01:24:07.300219 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:24:09 crc kubenswrapper[4884]: E1210 01:24:09.291153 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:24:09 crc kubenswrapper[4884]: E1210 01:24:09.291151 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:24:11 crc kubenswrapper[4884]: I1210 01:24:11.059393 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:24:11 crc kubenswrapper[4884]: I1210 01:24:11.147221 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:24:11 crc kubenswrapper[4884]: I1210 01:24:11.316109 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vnmc7"] Dec 10 01:24:12 crc kubenswrapper[4884]: I1210 01:24:12.285284 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vnmc7" podUID="28081624-abfc-4960-991e-7ee3b3907690" containerName="registry-server" containerID="cri-o://d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af" gracePeriod=2 Dec 10 01:24:12 crc kubenswrapper[4884]: I1210 01:24:12.935154 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:24:12 crc kubenswrapper[4884]: I1210 01:24:12.995792 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glsqb\" (UniqueName: \"kubernetes.io/projected/28081624-abfc-4960-991e-7ee3b3907690-kube-api-access-glsqb\") pod \"28081624-abfc-4960-991e-7ee3b3907690\" (UID: \"28081624-abfc-4960-991e-7ee3b3907690\") " Dec 10 01:24:12 crc kubenswrapper[4884]: I1210 01:24:12.995960 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28081624-abfc-4960-991e-7ee3b3907690-utilities\") pod \"28081624-abfc-4960-991e-7ee3b3907690\" (UID: \"28081624-abfc-4960-991e-7ee3b3907690\") " Dec 10 01:24:12 crc kubenswrapper[4884]: I1210 01:24:12.996011 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28081624-abfc-4960-991e-7ee3b3907690-catalog-content\") pod \"28081624-abfc-4960-991e-7ee3b3907690\" (UID: \"28081624-abfc-4960-991e-7ee3b3907690\") " Dec 10 01:24:12 crc kubenswrapper[4884]: I1210 01:24:12.996877 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28081624-abfc-4960-991e-7ee3b3907690-utilities" (OuterVolumeSpecName: "utilities") pod "28081624-abfc-4960-991e-7ee3b3907690" (UID: "28081624-abfc-4960-991e-7ee3b3907690"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.012675 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28081624-abfc-4960-991e-7ee3b3907690-kube-api-access-glsqb" (OuterVolumeSpecName: "kube-api-access-glsqb") pod "28081624-abfc-4960-991e-7ee3b3907690" (UID: "28081624-abfc-4960-991e-7ee3b3907690"). InnerVolumeSpecName "kube-api-access-glsqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.099412 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glsqb\" (UniqueName: \"kubernetes.io/projected/28081624-abfc-4960-991e-7ee3b3907690-kube-api-access-glsqb\") on node \"crc\" DevicePath \"\"" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.099518 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28081624-abfc-4960-991e-7ee3b3907690-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.113557 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28081624-abfc-4960-991e-7ee3b3907690-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "28081624-abfc-4960-991e-7ee3b3907690" (UID: "28081624-abfc-4960-991e-7ee3b3907690"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.201850 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28081624-abfc-4960-991e-7ee3b3907690-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.306018 4884 generic.go:334] "Generic (PLEG): container finished" podID="28081624-abfc-4960-991e-7ee3b3907690" containerID="d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af" exitCode=0 Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.306147 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vnmc7" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.309015 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vnmc7" event={"ID":"28081624-abfc-4960-991e-7ee3b3907690","Type":"ContainerDied","Data":"d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af"} Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.309059 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vnmc7" event={"ID":"28081624-abfc-4960-991e-7ee3b3907690","Type":"ContainerDied","Data":"d2c5dc598d173990ea94686df947c38d24e80b1dd52e96cf16a301f18a9741bd"} Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.309081 4884 scope.go:117] "RemoveContainer" containerID="d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.363428 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vnmc7"] Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.363760 4884 scope.go:117] "RemoveContainer" containerID="92e3314310d178edf2eea64f7ff2c2b9ff9b90cd642adf1bcffa63565db5d43d" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.375610 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vnmc7"] Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.387482 4884 scope.go:117] "RemoveContainer" containerID="b57c8b8ab25d7b39f8ea17a5f8214b622902ee68d3cd583c64345675c35b1c7b" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.444089 4884 scope.go:117] "RemoveContainer" containerID="d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af" Dec 10 01:24:13 crc kubenswrapper[4884]: E1210 01:24:13.444652 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af\": container with ID starting with d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af not found: ID does not exist" containerID="d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.444710 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af"} err="failed to get container status \"d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af\": rpc error: code = NotFound desc = could not find container \"d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af\": container with ID starting with d7db41eacf36a26ed8378d7998a2e0b77778bf4305100046697f8d070e76d6af not found: ID does not exist" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.444742 4884 scope.go:117] "RemoveContainer" containerID="92e3314310d178edf2eea64f7ff2c2b9ff9b90cd642adf1bcffa63565db5d43d" Dec 10 01:24:13 crc kubenswrapper[4884]: E1210 01:24:13.445189 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92e3314310d178edf2eea64f7ff2c2b9ff9b90cd642adf1bcffa63565db5d43d\": container with ID starting with 92e3314310d178edf2eea64f7ff2c2b9ff9b90cd642adf1bcffa63565db5d43d not found: ID does not exist" containerID="92e3314310d178edf2eea64f7ff2c2b9ff9b90cd642adf1bcffa63565db5d43d" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.445225 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92e3314310d178edf2eea64f7ff2c2b9ff9b90cd642adf1bcffa63565db5d43d"} err="failed to get container status \"92e3314310d178edf2eea64f7ff2c2b9ff9b90cd642adf1bcffa63565db5d43d\": rpc error: code = NotFound desc = could not find container \"92e3314310d178edf2eea64f7ff2c2b9ff9b90cd642adf1bcffa63565db5d43d\": container with ID starting with 92e3314310d178edf2eea64f7ff2c2b9ff9b90cd642adf1bcffa63565db5d43d not found: ID does not exist" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.445248 4884 scope.go:117] "RemoveContainer" containerID="b57c8b8ab25d7b39f8ea17a5f8214b622902ee68d3cd583c64345675c35b1c7b" Dec 10 01:24:13 crc kubenswrapper[4884]: E1210 01:24:13.445786 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b57c8b8ab25d7b39f8ea17a5f8214b622902ee68d3cd583c64345675c35b1c7b\": container with ID starting with b57c8b8ab25d7b39f8ea17a5f8214b622902ee68d3cd583c64345675c35b1c7b not found: ID does not exist" containerID="b57c8b8ab25d7b39f8ea17a5f8214b622902ee68d3cd583c64345675c35b1c7b" Dec 10 01:24:13 crc kubenswrapper[4884]: I1210 01:24:13.445831 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b57c8b8ab25d7b39f8ea17a5f8214b622902ee68d3cd583c64345675c35b1c7b"} err="failed to get container status \"b57c8b8ab25d7b39f8ea17a5f8214b622902ee68d3cd583c64345675c35b1c7b\": rpc error: code = NotFound desc = could not find container \"b57c8b8ab25d7b39f8ea17a5f8214b622902ee68d3cd583c64345675c35b1c7b\": container with ID starting with b57c8b8ab25d7b39f8ea17a5f8214b622902ee68d3cd583c64345675c35b1c7b not found: ID does not exist" Dec 10 01:24:15 crc kubenswrapper[4884]: I1210 01:24:15.311185 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28081624-abfc-4960-991e-7ee3b3907690" path="/var/lib/kubelet/pods/28081624-abfc-4960-991e-7ee3b3907690/volumes" Dec 10 01:24:19 crc kubenswrapper[4884]: I1210 01:24:19.286988 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:24:20 crc kubenswrapper[4884]: I1210 01:24:20.398737 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"1e68ee0e36c463fd39e73cdb3752be5c9fb329079efa100c1ea047c36f426cba"} Dec 10 01:24:21 crc kubenswrapper[4884]: E1210 01:24:21.291280 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:24:23 crc kubenswrapper[4884]: E1210 01:24:23.291146 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:24:35 crc kubenswrapper[4884]: E1210 01:24:35.290348 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:24:36 crc kubenswrapper[4884]: E1210 01:24:36.289972 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.036081 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2"] Dec 10 01:24:44 crc kubenswrapper[4884]: E1210 01:24:44.037228 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28081624-abfc-4960-991e-7ee3b3907690" containerName="extract-utilities" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.037245 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="28081624-abfc-4960-991e-7ee3b3907690" containerName="extract-utilities" Dec 10 01:24:44 crc kubenswrapper[4884]: E1210 01:24:44.037293 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28081624-abfc-4960-991e-7ee3b3907690" containerName="registry-server" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.037301 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="28081624-abfc-4960-991e-7ee3b3907690" containerName="registry-server" Dec 10 01:24:44 crc kubenswrapper[4884]: E1210 01:24:44.037314 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28081624-abfc-4960-991e-7ee3b3907690" containerName="extract-content" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.037322 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="28081624-abfc-4960-991e-7ee3b3907690" containerName="extract-content" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.037599 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="28081624-abfc-4960-991e-7ee3b3907690" containerName="registry-server" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.038513 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.041725 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.041977 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.042311 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.042743 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.043298 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.073597 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.073672 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.073737 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.073781 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4q4x\" (UniqueName: \"kubernetes.io/projected/54624723-55f9-4b1e-8d32-66fc274ff7af-kube-api-access-k4q4x\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.073885 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.073920 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.074077 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.080774 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2"] Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.176195 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.176261 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.176317 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.176385 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.176425 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4q4x\" (UniqueName: \"kubernetes.io/projected/54624723-55f9-4b1e-8d32-66fc274ff7af-kube-api-access-k4q4x\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.176618 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.176667 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.183157 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.183207 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.183943 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.184315 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.184564 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.187246 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.206501 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4q4x\" (UniqueName: \"kubernetes.io/projected/54624723-55f9-4b1e-8d32-66fc274ff7af-kube-api-access-k4q4x\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:44 crc kubenswrapper[4884]: I1210 01:24:44.370402 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:24:45 crc kubenswrapper[4884]: I1210 01:24:45.011421 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2"] Dec 10 01:24:45 crc kubenswrapper[4884]: I1210 01:24:45.738170 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" event={"ID":"54624723-55f9-4b1e-8d32-66fc274ff7af","Type":"ContainerStarted","Data":"1c8e9bea03772881fc2b403cfc8b4cd1266bc9a1d22440a943f05b71f3a8f9d8"} Dec 10 01:24:46 crc kubenswrapper[4884]: I1210 01:24:46.753520 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" event={"ID":"54624723-55f9-4b1e-8d32-66fc274ff7af","Type":"ContainerStarted","Data":"3b43e3fb508d13b3a33656f60d3ebbf2747e25195d84d638c142df161447ff79"} Dec 10 01:24:46 crc kubenswrapper[4884]: I1210 01:24:46.784729 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" podStartSLOduration=2.187094479 podStartE2EDuration="2.784704353s" podCreationTimestamp="2025-12-10 01:24:44 +0000 UTC" firstStartedPulling="2025-12-10 01:24:45.016523878 +0000 UTC m=+3258.094480995" lastFinishedPulling="2025-12-10 01:24:45.614133742 +0000 UTC m=+3258.692090869" observedRunningTime="2025-12-10 01:24:46.778871576 +0000 UTC m=+3259.856828723" watchObservedRunningTime="2025-12-10 01:24:46.784704353 +0000 UTC m=+3259.862661490" Dec 10 01:24:47 crc kubenswrapper[4884]: E1210 01:24:47.309305 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:24:50 crc kubenswrapper[4884]: E1210 01:24:50.290253 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:25:01 crc kubenswrapper[4884]: E1210 01:25:01.290542 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:25:03 crc kubenswrapper[4884]: E1210 01:25:03.290697 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:25:14 crc kubenswrapper[4884]: E1210 01:25:14.290526 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:25:16 crc kubenswrapper[4884]: E1210 01:25:16.289059 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:25:25 crc kubenswrapper[4884]: I1210 01:25:25.291116 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 01:25:25 crc kubenswrapper[4884]: E1210 01:25:25.416320 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:25:25 crc kubenswrapper[4884]: E1210 01:25:25.416376 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:25:25 crc kubenswrapper[4884]: E1210 01:25:25.416529 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:25:25 crc kubenswrapper[4884]: E1210 01:25:25.417694 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:25:27 crc kubenswrapper[4884]: E1210 01:25:27.430674 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:25:27 crc kubenswrapper[4884]: E1210 01:25:27.431361 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:25:27 crc kubenswrapper[4884]: E1210 01:25:27.431617 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:25:27 crc kubenswrapper[4884]: E1210 01:25:27.432934 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:25:38 crc kubenswrapper[4884]: E1210 01:25:38.290037 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:25:42 crc kubenswrapper[4884]: I1210 01:25:42.283474 4884 scope.go:117] "RemoveContainer" containerID="e05f72638b11089d54e1dca4cc6ecd9fb2d5cf80578bcce9105b344089f1960d" Dec 10 01:25:42 crc kubenswrapper[4884]: E1210 01:25:42.288120 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:25:42 crc kubenswrapper[4884]: I1210 01:25:42.332320 4884 scope.go:117] "RemoveContainer" containerID="a2ef8fe2918c00d9a5b88cdde541c1a65aa204f86b93f97c24e24322a782dcf0" Dec 10 01:25:42 crc kubenswrapper[4884]: I1210 01:25:42.384966 4884 scope.go:117] "RemoveContainer" containerID="f8cac88b76d5d7d380dd53e80b8d12b6580342e060def546bd4975340e086e8f" Dec 10 01:25:45 crc kubenswrapper[4884]: I1210 01:25:45.544844 4884 generic.go:334] "Generic (PLEG): container finished" podID="54624723-55f9-4b1e-8d32-66fc274ff7af" containerID="3b43e3fb508d13b3a33656f60d3ebbf2747e25195d84d638c142df161447ff79" exitCode=2 Dec 10 01:25:45 crc kubenswrapper[4884]: I1210 01:25:45.544947 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" event={"ID":"54624723-55f9-4b1e-8d32-66fc274ff7af","Type":"ContainerDied","Data":"3b43e3fb508d13b3a33656f60d3ebbf2747e25195d84d638c142df161447ff79"} Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.155696 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.270100 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-inventory\") pod \"54624723-55f9-4b1e-8d32-66fc274ff7af\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.270192 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4q4x\" (UniqueName: \"kubernetes.io/projected/54624723-55f9-4b1e-8d32-66fc274ff7af-kube-api-access-k4q4x\") pod \"54624723-55f9-4b1e-8d32-66fc274ff7af\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.270237 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-2\") pod \"54624723-55f9-4b1e-8d32-66fc274ff7af\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.270274 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-1\") pod \"54624723-55f9-4b1e-8d32-66fc274ff7af\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.270298 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-0\") pod \"54624723-55f9-4b1e-8d32-66fc274ff7af\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.270353 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ssh-key\") pod \"54624723-55f9-4b1e-8d32-66fc274ff7af\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.270422 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-telemetry-combined-ca-bundle\") pod \"54624723-55f9-4b1e-8d32-66fc274ff7af\" (UID: \"54624723-55f9-4b1e-8d32-66fc274ff7af\") " Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.278614 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "54624723-55f9-4b1e-8d32-66fc274ff7af" (UID: "54624723-55f9-4b1e-8d32-66fc274ff7af"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.280320 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54624723-55f9-4b1e-8d32-66fc274ff7af-kube-api-access-k4q4x" (OuterVolumeSpecName: "kube-api-access-k4q4x") pod "54624723-55f9-4b1e-8d32-66fc274ff7af" (UID: "54624723-55f9-4b1e-8d32-66fc274ff7af"). InnerVolumeSpecName "kube-api-access-k4q4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.309740 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "54624723-55f9-4b1e-8d32-66fc274ff7af" (UID: "54624723-55f9-4b1e-8d32-66fc274ff7af"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.320089 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "54624723-55f9-4b1e-8d32-66fc274ff7af" (UID: "54624723-55f9-4b1e-8d32-66fc274ff7af"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.320506 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-inventory" (OuterVolumeSpecName: "inventory") pod "54624723-55f9-4b1e-8d32-66fc274ff7af" (UID: "54624723-55f9-4b1e-8d32-66fc274ff7af"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.327239 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "54624723-55f9-4b1e-8d32-66fc274ff7af" (UID: "54624723-55f9-4b1e-8d32-66fc274ff7af"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.344163 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "54624723-55f9-4b1e-8d32-66fc274ff7af" (UID: "54624723-55f9-4b1e-8d32-66fc274ff7af"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.373390 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.373462 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.373497 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.373511 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.373527 4884 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.373541 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/54624723-55f9-4b1e-8d32-66fc274ff7af-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.373553 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4q4x\" (UniqueName: \"kubernetes.io/projected/54624723-55f9-4b1e-8d32-66fc274ff7af-kube-api-access-k4q4x\") on node \"crc\" DevicePath \"\"" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.576888 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" event={"ID":"54624723-55f9-4b1e-8d32-66fc274ff7af","Type":"ContainerDied","Data":"1c8e9bea03772881fc2b403cfc8b4cd1266bc9a1d22440a943f05b71f3a8f9d8"} Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.577561 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c8e9bea03772881fc2b403cfc8b4cd1266bc9a1d22440a943f05b71f3a8f9d8" Dec 10 01:25:47 crc kubenswrapper[4884]: I1210 01:25:47.576926 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.579888 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7gmq6"] Dec 10 01:25:51 crc kubenswrapper[4884]: E1210 01:25:51.583451 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54624723-55f9-4b1e-8d32-66fc274ff7af" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.583483 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="54624723-55f9-4b1e-8d32-66fc274ff7af" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.583798 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="54624723-55f9-4b1e-8d32-66fc274ff7af" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.585622 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.602233 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7gmq6"] Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.667064 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c60690-8f74-4ebf-921f-73902e8cf036-catalog-content\") pod \"certified-operators-7gmq6\" (UID: \"06c60690-8f74-4ebf-921f-73902e8cf036\") " pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.667204 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c60690-8f74-4ebf-921f-73902e8cf036-utilities\") pod \"certified-operators-7gmq6\" (UID: \"06c60690-8f74-4ebf-921f-73902e8cf036\") " pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.667237 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkvzp\" (UniqueName: \"kubernetes.io/projected/06c60690-8f74-4ebf-921f-73902e8cf036-kube-api-access-nkvzp\") pod \"certified-operators-7gmq6\" (UID: \"06c60690-8f74-4ebf-921f-73902e8cf036\") " pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.769226 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c60690-8f74-4ebf-921f-73902e8cf036-utilities\") pod \"certified-operators-7gmq6\" (UID: \"06c60690-8f74-4ebf-921f-73902e8cf036\") " pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.769293 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkvzp\" (UniqueName: \"kubernetes.io/projected/06c60690-8f74-4ebf-921f-73902e8cf036-kube-api-access-nkvzp\") pod \"certified-operators-7gmq6\" (UID: \"06c60690-8f74-4ebf-921f-73902e8cf036\") " pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.769493 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c60690-8f74-4ebf-921f-73902e8cf036-catalog-content\") pod \"certified-operators-7gmq6\" (UID: \"06c60690-8f74-4ebf-921f-73902e8cf036\") " pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.769911 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c60690-8f74-4ebf-921f-73902e8cf036-utilities\") pod \"certified-operators-7gmq6\" (UID: \"06c60690-8f74-4ebf-921f-73902e8cf036\") " pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.769987 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c60690-8f74-4ebf-921f-73902e8cf036-catalog-content\") pod \"certified-operators-7gmq6\" (UID: \"06c60690-8f74-4ebf-921f-73902e8cf036\") " pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.794577 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkvzp\" (UniqueName: \"kubernetes.io/projected/06c60690-8f74-4ebf-921f-73902e8cf036-kube-api-access-nkvzp\") pod \"certified-operators-7gmq6\" (UID: \"06c60690-8f74-4ebf-921f-73902e8cf036\") " pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:25:51 crc kubenswrapper[4884]: I1210 01:25:51.919926 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:25:52 crc kubenswrapper[4884]: E1210 01:25:52.292789 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:25:52 crc kubenswrapper[4884]: I1210 01:25:52.319581 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7gmq6"] Dec 10 01:25:52 crc kubenswrapper[4884]: I1210 01:25:52.633952 4884 generic.go:334] "Generic (PLEG): container finished" podID="06c60690-8f74-4ebf-921f-73902e8cf036" containerID="107d6a5c6c3aeb03a2d4717992150ae7a2d408a5e350968a610ce6b7596e9240" exitCode=0 Dec 10 01:25:52 crc kubenswrapper[4884]: I1210 01:25:52.634048 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7gmq6" event={"ID":"06c60690-8f74-4ebf-921f-73902e8cf036","Type":"ContainerDied","Data":"107d6a5c6c3aeb03a2d4717992150ae7a2d408a5e350968a610ce6b7596e9240"} Dec 10 01:25:52 crc kubenswrapper[4884]: I1210 01:25:52.635233 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7gmq6" event={"ID":"06c60690-8f74-4ebf-921f-73902e8cf036","Type":"ContainerStarted","Data":"15f7f6503897b31384c7aab4151ea7d966b8ea8b6c1a3632cffef2af950f9d55"} Dec 10 01:25:54 crc kubenswrapper[4884]: I1210 01:25:54.670593 4884 generic.go:334] "Generic (PLEG): container finished" podID="06c60690-8f74-4ebf-921f-73902e8cf036" containerID="11486516eb623a83a8790e3b1690095e402b2afbd5bdc7584bb2dfc60085ef0f" exitCode=0 Dec 10 01:25:54 crc kubenswrapper[4884]: I1210 01:25:54.670663 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7gmq6" event={"ID":"06c60690-8f74-4ebf-921f-73902e8cf036","Type":"ContainerDied","Data":"11486516eb623a83a8790e3b1690095e402b2afbd5bdc7584bb2dfc60085ef0f"} Dec 10 01:25:55 crc kubenswrapper[4884]: E1210 01:25:55.290226 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:25:55 crc kubenswrapper[4884]: I1210 01:25:55.684708 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7gmq6" event={"ID":"06c60690-8f74-4ebf-921f-73902e8cf036","Type":"ContainerStarted","Data":"95df6d945b891aaa6f7368898ad66b0cead8f62716cdee53c3b6996d3bc76700"} Dec 10 01:25:55 crc kubenswrapper[4884]: I1210 01:25:55.718941 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7gmq6" podStartSLOduration=2.261793385 podStartE2EDuration="4.718921761s" podCreationTimestamp="2025-12-10 01:25:51 +0000 UTC" firstStartedPulling="2025-12-10 01:25:52.635955368 +0000 UTC m=+3325.713912485" lastFinishedPulling="2025-12-10 01:25:55.093083734 +0000 UTC m=+3328.171040861" observedRunningTime="2025-12-10 01:25:55.709539717 +0000 UTC m=+3328.787505544" watchObservedRunningTime="2025-12-10 01:25:55.718921761 +0000 UTC m=+3328.796878868" Dec 10 01:26:01 crc kubenswrapper[4884]: I1210 01:26:01.920934 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:26:01 crc kubenswrapper[4884]: I1210 01:26:01.922179 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:26:01 crc kubenswrapper[4884]: I1210 01:26:01.995364 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:26:02 crc kubenswrapper[4884]: I1210 01:26:02.862194 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:26:02 crc kubenswrapper[4884]: I1210 01:26:02.934509 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7gmq6"] Dec 10 01:26:04 crc kubenswrapper[4884]: I1210 01:26:04.794339 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7gmq6" podUID="06c60690-8f74-4ebf-921f-73902e8cf036" containerName="registry-server" containerID="cri-o://95df6d945b891aaa6f7368898ad66b0cead8f62716cdee53c3b6996d3bc76700" gracePeriod=2 Dec 10 01:26:05 crc kubenswrapper[4884]: E1210 01:26:05.294296 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:26:05 crc kubenswrapper[4884]: I1210 01:26:05.806642 4884 generic.go:334] "Generic (PLEG): container finished" podID="06c60690-8f74-4ebf-921f-73902e8cf036" containerID="95df6d945b891aaa6f7368898ad66b0cead8f62716cdee53c3b6996d3bc76700" exitCode=0 Dec 10 01:26:05 crc kubenswrapper[4884]: I1210 01:26:05.806952 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7gmq6" event={"ID":"06c60690-8f74-4ebf-921f-73902e8cf036","Type":"ContainerDied","Data":"95df6d945b891aaa6f7368898ad66b0cead8f62716cdee53c3b6996d3bc76700"} Dec 10 01:26:05 crc kubenswrapper[4884]: I1210 01:26:05.806981 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7gmq6" event={"ID":"06c60690-8f74-4ebf-921f-73902e8cf036","Type":"ContainerDied","Data":"15f7f6503897b31384c7aab4151ea7d966b8ea8b6c1a3632cffef2af950f9d55"} Dec 10 01:26:05 crc kubenswrapper[4884]: I1210 01:26:05.807003 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15f7f6503897b31384c7aab4151ea7d966b8ea8b6c1a3632cffef2af950f9d55" Dec 10 01:26:05 crc kubenswrapper[4884]: I1210 01:26:05.823708 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:26:05 crc kubenswrapper[4884]: I1210 01:26:05.910749 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c60690-8f74-4ebf-921f-73902e8cf036-utilities\") pod \"06c60690-8f74-4ebf-921f-73902e8cf036\" (UID: \"06c60690-8f74-4ebf-921f-73902e8cf036\") " Dec 10 01:26:05 crc kubenswrapper[4884]: I1210 01:26:05.910813 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkvzp\" (UniqueName: \"kubernetes.io/projected/06c60690-8f74-4ebf-921f-73902e8cf036-kube-api-access-nkvzp\") pod \"06c60690-8f74-4ebf-921f-73902e8cf036\" (UID: \"06c60690-8f74-4ebf-921f-73902e8cf036\") " Dec 10 01:26:05 crc kubenswrapper[4884]: I1210 01:26:05.911205 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c60690-8f74-4ebf-921f-73902e8cf036-catalog-content\") pod \"06c60690-8f74-4ebf-921f-73902e8cf036\" (UID: \"06c60690-8f74-4ebf-921f-73902e8cf036\") " Dec 10 01:26:05 crc kubenswrapper[4884]: I1210 01:26:05.911792 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06c60690-8f74-4ebf-921f-73902e8cf036-utilities" (OuterVolumeSpecName: "utilities") pod "06c60690-8f74-4ebf-921f-73902e8cf036" (UID: "06c60690-8f74-4ebf-921f-73902e8cf036"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:26:05 crc kubenswrapper[4884]: I1210 01:26:05.917766 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06c60690-8f74-4ebf-921f-73902e8cf036-kube-api-access-nkvzp" (OuterVolumeSpecName: "kube-api-access-nkvzp") pod "06c60690-8f74-4ebf-921f-73902e8cf036" (UID: "06c60690-8f74-4ebf-921f-73902e8cf036"). InnerVolumeSpecName "kube-api-access-nkvzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:26:05 crc kubenswrapper[4884]: I1210 01:26:05.954336 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06c60690-8f74-4ebf-921f-73902e8cf036-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "06c60690-8f74-4ebf-921f-73902e8cf036" (UID: "06c60690-8f74-4ebf-921f-73902e8cf036"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:26:06 crc kubenswrapper[4884]: I1210 01:26:06.012751 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/06c60690-8f74-4ebf-921f-73902e8cf036-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:26:06 crc kubenswrapper[4884]: I1210 01:26:06.012780 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/06c60690-8f74-4ebf-921f-73902e8cf036-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:26:06 crc kubenswrapper[4884]: I1210 01:26:06.012791 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkvzp\" (UniqueName: \"kubernetes.io/projected/06c60690-8f74-4ebf-921f-73902e8cf036-kube-api-access-nkvzp\") on node \"crc\" DevicePath \"\"" Dec 10 01:26:06 crc kubenswrapper[4884]: I1210 01:26:06.815769 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7gmq6" Dec 10 01:26:06 crc kubenswrapper[4884]: I1210 01:26:06.853629 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7gmq6"] Dec 10 01:26:06 crc kubenswrapper[4884]: I1210 01:26:06.862541 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7gmq6"] Dec 10 01:26:07 crc kubenswrapper[4884]: I1210 01:26:07.308950 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06c60690-8f74-4ebf-921f-73902e8cf036" path="/var/lib/kubelet/pods/06c60690-8f74-4ebf-921f-73902e8cf036/volumes" Dec 10 01:26:10 crc kubenswrapper[4884]: E1210 01:26:10.290612 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:26:17 crc kubenswrapper[4884]: E1210 01:26:17.311629 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:26:21 crc kubenswrapper[4884]: E1210 01:26:21.290677 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:26:32 crc kubenswrapper[4884]: E1210 01:26:32.291185 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:26:34 crc kubenswrapper[4884]: E1210 01:26:34.296228 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:26:45 crc kubenswrapper[4884]: E1210 01:26:45.291288 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:26:46 crc kubenswrapper[4884]: E1210 01:26:46.292126 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:26:48 crc kubenswrapper[4884]: I1210 01:26:48.098724 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:26:48 crc kubenswrapper[4884]: I1210 01:26:48.099506 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:26:57 crc kubenswrapper[4884]: E1210 01:26:57.303628 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:26:58 crc kubenswrapper[4884]: E1210 01:26:58.292007 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:27:11 crc kubenswrapper[4884]: E1210 01:27:11.289893 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:27:12 crc kubenswrapper[4884]: E1210 01:27:12.289178 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:27:18 crc kubenswrapper[4884]: I1210 01:27:18.099103 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:27:18 crc kubenswrapper[4884]: I1210 01:27:18.099881 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:27:22 crc kubenswrapper[4884]: E1210 01:27:22.290356 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:27:24 crc kubenswrapper[4884]: E1210 01:27:24.289407 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:27:33 crc kubenswrapper[4884]: E1210 01:27:33.291946 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:27:37 crc kubenswrapper[4884]: E1210 01:27:37.325809 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:27:45 crc kubenswrapper[4884]: E1210 01:27:45.293580 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:27:48 crc kubenswrapper[4884]: I1210 01:27:48.098802 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:27:48 crc kubenswrapper[4884]: I1210 01:27:48.099257 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:27:48 crc kubenswrapper[4884]: I1210 01:27:48.099299 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 01:27:48 crc kubenswrapper[4884]: I1210 01:27:48.099993 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1e68ee0e36c463fd39e73cdb3752be5c9fb329079efa100c1ea047c36f426cba"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 01:27:48 crc kubenswrapper[4884]: I1210 01:27:48.100043 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://1e68ee0e36c463fd39e73cdb3752be5c9fb329079efa100c1ea047c36f426cba" gracePeriod=600 Dec 10 01:27:48 crc kubenswrapper[4884]: E1210 01:27:48.288660 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:27:48 crc kubenswrapper[4884]: I1210 01:27:48.338104 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="1e68ee0e36c463fd39e73cdb3752be5c9fb329079efa100c1ea047c36f426cba" exitCode=0 Dec 10 01:27:48 crc kubenswrapper[4884]: I1210 01:27:48.338161 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"1e68ee0e36c463fd39e73cdb3752be5c9fb329079efa100c1ea047c36f426cba"} Dec 10 01:27:48 crc kubenswrapper[4884]: I1210 01:27:48.338212 4884 scope.go:117] "RemoveContainer" containerID="bf363ce87ab3ce58c51ed6348dc45276efa30d1075dbc2cf0eaf0a942659bf97" Dec 10 01:27:49 crc kubenswrapper[4884]: I1210 01:27:49.355529 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f"} Dec 10 01:27:56 crc kubenswrapper[4884]: E1210 01:27:56.290885 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:28:00 crc kubenswrapper[4884]: E1210 01:28:00.297804 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:28:07 crc kubenswrapper[4884]: E1210 01:28:07.302490 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:28:16 crc kubenswrapper[4884]: E1210 01:28:16.290342 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:28:20 crc kubenswrapper[4884]: E1210 01:28:20.290822 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.038987 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz"] Dec 10 01:28:25 crc kubenswrapper[4884]: E1210 01:28:25.039972 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c60690-8f74-4ebf-921f-73902e8cf036" containerName="extract-content" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.039989 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c60690-8f74-4ebf-921f-73902e8cf036" containerName="extract-content" Dec 10 01:28:25 crc kubenswrapper[4884]: E1210 01:28:25.040009 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c60690-8f74-4ebf-921f-73902e8cf036" containerName="registry-server" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.040017 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c60690-8f74-4ebf-921f-73902e8cf036" containerName="registry-server" Dec 10 01:28:25 crc kubenswrapper[4884]: E1210 01:28:25.040051 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06c60690-8f74-4ebf-921f-73902e8cf036" containerName="extract-utilities" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.040061 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="06c60690-8f74-4ebf-921f-73902e8cf036" containerName="extract-utilities" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.040330 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="06c60690-8f74-4ebf-921f-73902e8cf036" containerName="registry-server" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.041331 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.049040 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.049127 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz"] Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.049320 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.049335 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.049662 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.049794 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.222656 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.222728 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.222768 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.222792 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.222816 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.222893 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.222916 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-274mc\" (UniqueName: \"kubernetes.io/projected/e2755ba3-53ef-4f12-91cf-1c90cde36fab-kube-api-access-274mc\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.324643 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.324697 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-274mc\" (UniqueName: \"kubernetes.io/projected/e2755ba3-53ef-4f12-91cf-1c90cde36fab-kube-api-access-274mc\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.324785 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.324845 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.324893 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.324927 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.324973 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.330596 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.330700 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.331058 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.331345 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.331770 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.331866 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.345854 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-274mc\" (UniqueName: \"kubernetes.io/projected/e2755ba3-53ef-4f12-91cf-1c90cde36fab-kube-api-access-274mc\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:25 crc kubenswrapper[4884]: I1210 01:28:25.375214 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:28:26 crc kubenswrapper[4884]: I1210 01:28:26.010266 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz"] Dec 10 01:28:26 crc kubenswrapper[4884]: I1210 01:28:26.781113 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" event={"ID":"e2755ba3-53ef-4f12-91cf-1c90cde36fab","Type":"ContainerStarted","Data":"52f9d3486aaed7bd31e6313f7e0afa24e5b1ab1bc47e42e2be6829181746434b"} Dec 10 01:28:27 crc kubenswrapper[4884]: I1210 01:28:27.797425 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" event={"ID":"e2755ba3-53ef-4f12-91cf-1c90cde36fab","Type":"ContainerStarted","Data":"978cb9347fee61eed3438b474cbef631d460a90b0134500031bd1afb0fe13b30"} Dec 10 01:28:27 crc kubenswrapper[4884]: I1210 01:28:27.839142 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" podStartSLOduration=2.294180254 podStartE2EDuration="2.839121075s" podCreationTimestamp="2025-12-10 01:28:25 +0000 UTC" firstStartedPulling="2025-12-10 01:28:25.998285777 +0000 UTC m=+3479.076242894" lastFinishedPulling="2025-12-10 01:28:26.543226558 +0000 UTC m=+3479.621183715" observedRunningTime="2025-12-10 01:28:27.82192494 +0000 UTC m=+3480.899882097" watchObservedRunningTime="2025-12-10 01:28:27.839121075 +0000 UTC m=+3480.917078202" Dec 10 01:28:31 crc kubenswrapper[4884]: E1210 01:28:31.289484 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:28:32 crc kubenswrapper[4884]: E1210 01:28:32.291816 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:28:42 crc kubenswrapper[4884]: E1210 01:28:42.304307 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:28:43 crc kubenswrapper[4884]: E1210 01:28:43.288794 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:28:55 crc kubenswrapper[4884]: E1210 01:28:55.290970 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:28:56 crc kubenswrapper[4884]: E1210 01:28:56.288508 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:29:09 crc kubenswrapper[4884]: E1210 01:29:09.290624 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:29:11 crc kubenswrapper[4884]: E1210 01:29:11.290370 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:29:23 crc kubenswrapper[4884]: E1210 01:29:23.291640 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:29:25 crc kubenswrapper[4884]: I1210 01:29:25.556452 4884 generic.go:334] "Generic (PLEG): container finished" podID="e2755ba3-53ef-4f12-91cf-1c90cde36fab" containerID="978cb9347fee61eed3438b474cbef631d460a90b0134500031bd1afb0fe13b30" exitCode=2 Dec 10 01:29:25 crc kubenswrapper[4884]: I1210 01:29:25.556524 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" event={"ID":"e2755ba3-53ef-4f12-91cf-1c90cde36fab","Type":"ContainerDied","Data":"978cb9347fee61eed3438b474cbef631d460a90b0134500031bd1afb0fe13b30"} Dec 10 01:29:26 crc kubenswrapper[4884]: E1210 01:29:26.291730 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.100276 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.166698 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-1\") pod \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.166754 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-0\") pod \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.166785 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-telemetry-combined-ca-bundle\") pod \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.166806 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-2\") pod \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.166914 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ssh-key\") pod \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.167090 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-274mc\" (UniqueName: \"kubernetes.io/projected/e2755ba3-53ef-4f12-91cf-1c90cde36fab-kube-api-access-274mc\") pod \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.167169 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-inventory\") pod \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\" (UID: \"e2755ba3-53ef-4f12-91cf-1c90cde36fab\") " Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.174734 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2755ba3-53ef-4f12-91cf-1c90cde36fab-kube-api-access-274mc" (OuterVolumeSpecName: "kube-api-access-274mc") pod "e2755ba3-53ef-4f12-91cf-1c90cde36fab" (UID: "e2755ba3-53ef-4f12-91cf-1c90cde36fab"). InnerVolumeSpecName "kube-api-access-274mc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.175034 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "e2755ba3-53ef-4f12-91cf-1c90cde36fab" (UID: "e2755ba3-53ef-4f12-91cf-1c90cde36fab"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.199553 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "e2755ba3-53ef-4f12-91cf-1c90cde36fab" (UID: "e2755ba3-53ef-4f12-91cf-1c90cde36fab"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.200183 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-inventory" (OuterVolumeSpecName: "inventory") pod "e2755ba3-53ef-4f12-91cf-1c90cde36fab" (UID: "e2755ba3-53ef-4f12-91cf-1c90cde36fab"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.213753 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "e2755ba3-53ef-4f12-91cf-1c90cde36fab" (UID: "e2755ba3-53ef-4f12-91cf-1c90cde36fab"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.215985 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e2755ba3-53ef-4f12-91cf-1c90cde36fab" (UID: "e2755ba3-53ef-4f12-91cf-1c90cde36fab"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.232866 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "e2755ba3-53ef-4f12-91cf-1c90cde36fab" (UID: "e2755ba3-53ef-4f12-91cf-1c90cde36fab"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.270107 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.270165 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.270181 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.270195 4884 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.270209 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.270221 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e2755ba3-53ef-4f12-91cf-1c90cde36fab-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.270238 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-274mc\" (UniqueName: \"kubernetes.io/projected/e2755ba3-53ef-4f12-91cf-1c90cde36fab-kube-api-access-274mc\") on node \"crc\" DevicePath \"\"" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.587053 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" event={"ID":"e2755ba3-53ef-4f12-91cf-1c90cde36fab","Type":"ContainerDied","Data":"52f9d3486aaed7bd31e6313f7e0afa24e5b1ab1bc47e42e2be6829181746434b"} Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.587111 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52f9d3486aaed7bd31e6313f7e0afa24e5b1ab1bc47e42e2be6829181746434b" Dec 10 01:29:27 crc kubenswrapper[4884]: I1210 01:29:27.587143 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz" Dec 10 01:29:34 crc kubenswrapper[4884]: E1210 01:29:34.290245 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:29:38 crc kubenswrapper[4884]: E1210 01:29:38.291286 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:29:46 crc kubenswrapper[4884]: E1210 01:29:46.310904 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.098806 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.100650 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.194502 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pjjj8"] Dec 10 01:29:48 crc kubenswrapper[4884]: E1210 01:29:48.195013 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2755ba3-53ef-4f12-91cf-1c90cde36fab" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.195036 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2755ba3-53ef-4f12-91cf-1c90cde36fab" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.195339 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2755ba3-53ef-4f12-91cf-1c90cde36fab" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.197221 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.219200 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pjjj8"] Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.226297 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f44067d-d377-4166-a60e-ca7a33268462-utilities\") pod \"community-operators-pjjj8\" (UID: \"5f44067d-d377-4166-a60e-ca7a33268462\") " pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.226351 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdxft\" (UniqueName: \"kubernetes.io/projected/5f44067d-d377-4166-a60e-ca7a33268462-kube-api-access-vdxft\") pod \"community-operators-pjjj8\" (UID: \"5f44067d-d377-4166-a60e-ca7a33268462\") " pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.226381 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f44067d-d377-4166-a60e-ca7a33268462-catalog-content\") pod \"community-operators-pjjj8\" (UID: \"5f44067d-d377-4166-a60e-ca7a33268462\") " pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.328425 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f44067d-d377-4166-a60e-ca7a33268462-utilities\") pod \"community-operators-pjjj8\" (UID: \"5f44067d-d377-4166-a60e-ca7a33268462\") " pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.328501 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdxft\" (UniqueName: \"kubernetes.io/projected/5f44067d-d377-4166-a60e-ca7a33268462-kube-api-access-vdxft\") pod \"community-operators-pjjj8\" (UID: \"5f44067d-d377-4166-a60e-ca7a33268462\") " pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.328535 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f44067d-d377-4166-a60e-ca7a33268462-catalog-content\") pod \"community-operators-pjjj8\" (UID: \"5f44067d-d377-4166-a60e-ca7a33268462\") " pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.329028 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f44067d-d377-4166-a60e-ca7a33268462-utilities\") pod \"community-operators-pjjj8\" (UID: \"5f44067d-d377-4166-a60e-ca7a33268462\") " pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.329046 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f44067d-d377-4166-a60e-ca7a33268462-catalog-content\") pod \"community-operators-pjjj8\" (UID: \"5f44067d-d377-4166-a60e-ca7a33268462\") " pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.355360 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdxft\" (UniqueName: \"kubernetes.io/projected/5f44067d-d377-4166-a60e-ca7a33268462-kube-api-access-vdxft\") pod \"community-operators-pjjj8\" (UID: \"5f44067d-d377-4166-a60e-ca7a33268462\") " pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:48 crc kubenswrapper[4884]: I1210 01:29:48.525718 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:49 crc kubenswrapper[4884]: I1210 01:29:49.071168 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pjjj8"] Dec 10 01:29:49 crc kubenswrapper[4884]: I1210 01:29:49.849347 4884 generic.go:334] "Generic (PLEG): container finished" podID="5f44067d-d377-4166-a60e-ca7a33268462" containerID="bc0a9b73e5e176265119fcf461e49aabda2a39afe1c3b80e909fb67d9a8c9b43" exitCode=0 Dec 10 01:29:49 crc kubenswrapper[4884]: I1210 01:29:49.849535 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pjjj8" event={"ID":"5f44067d-d377-4166-a60e-ca7a33268462","Type":"ContainerDied","Data":"bc0a9b73e5e176265119fcf461e49aabda2a39afe1c3b80e909fb67d9a8c9b43"} Dec 10 01:29:49 crc kubenswrapper[4884]: I1210 01:29:49.849729 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pjjj8" event={"ID":"5f44067d-d377-4166-a60e-ca7a33268462","Type":"ContainerStarted","Data":"d4ea23c3a2899d04d79251a129987e05ba0e8918e93e4c54833d1719ca790e57"} Dec 10 01:29:50 crc kubenswrapper[4884]: I1210 01:29:50.862245 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pjjj8" event={"ID":"5f44067d-d377-4166-a60e-ca7a33268462","Type":"ContainerStarted","Data":"a76634a9d266b5939bb268b4c0e1335e3be1760802c8de5c00a61d2307ad1bdb"} Dec 10 01:29:51 crc kubenswrapper[4884]: I1210 01:29:51.874286 4884 generic.go:334] "Generic (PLEG): container finished" podID="5f44067d-d377-4166-a60e-ca7a33268462" containerID="a76634a9d266b5939bb268b4c0e1335e3be1760802c8de5c00a61d2307ad1bdb" exitCode=0 Dec 10 01:29:51 crc kubenswrapper[4884]: I1210 01:29:51.874344 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pjjj8" event={"ID":"5f44067d-d377-4166-a60e-ca7a33268462","Type":"ContainerDied","Data":"a76634a9d266b5939bb268b4c0e1335e3be1760802c8de5c00a61d2307ad1bdb"} Dec 10 01:29:52 crc kubenswrapper[4884]: E1210 01:29:52.289970 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:29:52 crc kubenswrapper[4884]: I1210 01:29:52.886556 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pjjj8" event={"ID":"5f44067d-d377-4166-a60e-ca7a33268462","Type":"ContainerStarted","Data":"8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564"} Dec 10 01:29:52 crc kubenswrapper[4884]: I1210 01:29:52.908323 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pjjj8" podStartSLOduration=2.439613136 podStartE2EDuration="4.908308855s" podCreationTimestamp="2025-12-10 01:29:48 +0000 UTC" firstStartedPulling="2025-12-10 01:29:49.851966386 +0000 UTC m=+3562.929923543" lastFinishedPulling="2025-12-10 01:29:52.320662135 +0000 UTC m=+3565.398619262" observedRunningTime="2025-12-10 01:29:52.904318468 +0000 UTC m=+3565.982275595" watchObservedRunningTime="2025-12-10 01:29:52.908308855 +0000 UTC m=+3565.986265972" Dec 10 01:29:58 crc kubenswrapper[4884]: I1210 01:29:58.526587 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:58 crc kubenswrapper[4884]: I1210 01:29:58.527403 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:58 crc kubenswrapper[4884]: I1210 01:29:58.618830 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:59 crc kubenswrapper[4884]: I1210 01:29:59.040086 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:29:59 crc kubenswrapper[4884]: I1210 01:29:59.101838 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pjjj8"] Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.166371 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965"] Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.168106 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.174618 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.175304 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.186962 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965"] Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.257710 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqfdl\" (UniqueName: \"kubernetes.io/projected/80b98453-4751-4c95-ac21-e698601a236a-kube-api-access-hqfdl\") pod \"collect-profiles-29422170-xp965\" (UID: \"80b98453-4751-4c95-ac21-e698601a236a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.257912 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/80b98453-4751-4c95-ac21-e698601a236a-config-volume\") pod \"collect-profiles-29422170-xp965\" (UID: \"80b98453-4751-4c95-ac21-e698601a236a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.257982 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/80b98453-4751-4c95-ac21-e698601a236a-secret-volume\") pod \"collect-profiles-29422170-xp965\" (UID: \"80b98453-4751-4c95-ac21-e698601a236a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.359067 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqfdl\" (UniqueName: \"kubernetes.io/projected/80b98453-4751-4c95-ac21-e698601a236a-kube-api-access-hqfdl\") pod \"collect-profiles-29422170-xp965\" (UID: \"80b98453-4751-4c95-ac21-e698601a236a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.359214 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/80b98453-4751-4c95-ac21-e698601a236a-config-volume\") pod \"collect-profiles-29422170-xp965\" (UID: \"80b98453-4751-4c95-ac21-e698601a236a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.359251 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/80b98453-4751-4c95-ac21-e698601a236a-secret-volume\") pod \"collect-profiles-29422170-xp965\" (UID: \"80b98453-4751-4c95-ac21-e698601a236a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.360132 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/80b98453-4751-4c95-ac21-e698601a236a-config-volume\") pod \"collect-profiles-29422170-xp965\" (UID: \"80b98453-4751-4c95-ac21-e698601a236a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.368673 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/80b98453-4751-4c95-ac21-e698601a236a-secret-volume\") pod \"collect-profiles-29422170-xp965\" (UID: \"80b98453-4751-4c95-ac21-e698601a236a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.395147 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqfdl\" (UniqueName: \"kubernetes.io/projected/80b98453-4751-4c95-ac21-e698601a236a-kube-api-access-hqfdl\") pod \"collect-profiles-29422170-xp965\" (UID: \"80b98453-4751-4c95-ac21-e698601a236a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:00 crc kubenswrapper[4884]: I1210 01:30:00.493876 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:01 crc kubenswrapper[4884]: I1210 01:30:01.018005 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pjjj8" podUID="5f44067d-d377-4166-a60e-ca7a33268462" containerName="registry-server" containerID="cri-o://8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564" gracePeriod=2 Dec 10 01:30:01 crc kubenswrapper[4884]: I1210 01:30:01.041296 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965"] Dec 10 01:30:01 crc kubenswrapper[4884]: E1210 01:30:01.297794 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:30:01 crc kubenswrapper[4884]: I1210 01:30:01.652179 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:30:01 crc kubenswrapper[4884]: I1210 01:30:01.792768 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f44067d-d377-4166-a60e-ca7a33268462-catalog-content\") pod \"5f44067d-d377-4166-a60e-ca7a33268462\" (UID: \"5f44067d-d377-4166-a60e-ca7a33268462\") " Dec 10 01:30:01 crc kubenswrapper[4884]: I1210 01:30:01.793913 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f44067d-d377-4166-a60e-ca7a33268462-utilities" (OuterVolumeSpecName: "utilities") pod "5f44067d-d377-4166-a60e-ca7a33268462" (UID: "5f44067d-d377-4166-a60e-ca7a33268462"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:30:01 crc kubenswrapper[4884]: I1210 01:30:01.792870 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f44067d-d377-4166-a60e-ca7a33268462-utilities\") pod \"5f44067d-d377-4166-a60e-ca7a33268462\" (UID: \"5f44067d-d377-4166-a60e-ca7a33268462\") " Dec 10 01:30:01 crc kubenswrapper[4884]: I1210 01:30:01.795811 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vdxft\" (UniqueName: \"kubernetes.io/projected/5f44067d-d377-4166-a60e-ca7a33268462-kube-api-access-vdxft\") pod \"5f44067d-d377-4166-a60e-ca7a33268462\" (UID: \"5f44067d-d377-4166-a60e-ca7a33268462\") " Dec 10 01:30:01 crc kubenswrapper[4884]: I1210 01:30:01.796753 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f44067d-d377-4166-a60e-ca7a33268462-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:30:01 crc kubenswrapper[4884]: I1210 01:30:01.803029 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f44067d-d377-4166-a60e-ca7a33268462-kube-api-access-vdxft" (OuterVolumeSpecName: "kube-api-access-vdxft") pod "5f44067d-d377-4166-a60e-ca7a33268462" (UID: "5f44067d-d377-4166-a60e-ca7a33268462"). InnerVolumeSpecName "kube-api-access-vdxft". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:30:01 crc kubenswrapper[4884]: I1210 01:30:01.845762 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f44067d-d377-4166-a60e-ca7a33268462-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5f44067d-d377-4166-a60e-ca7a33268462" (UID: "5f44067d-d377-4166-a60e-ca7a33268462"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:30:01 crc kubenswrapper[4884]: I1210 01:30:01.898796 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f44067d-d377-4166-a60e-ca7a33268462-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:30:01 crc kubenswrapper[4884]: I1210 01:30:01.898856 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vdxft\" (UniqueName: \"kubernetes.io/projected/5f44067d-d377-4166-a60e-ca7a33268462-kube-api-access-vdxft\") on node \"crc\" DevicePath \"\"" Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.032649 4884 generic.go:334] "Generic (PLEG): container finished" podID="5f44067d-d377-4166-a60e-ca7a33268462" containerID="8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564" exitCode=0 Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.032692 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pjjj8" Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.032698 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pjjj8" event={"ID":"5f44067d-d377-4166-a60e-ca7a33268462","Type":"ContainerDied","Data":"8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564"} Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.032744 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pjjj8" event={"ID":"5f44067d-d377-4166-a60e-ca7a33268462","Type":"ContainerDied","Data":"d4ea23c3a2899d04d79251a129987e05ba0e8918e93e4c54833d1719ca790e57"} Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.032773 4884 scope.go:117] "RemoveContainer" containerID="8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564" Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.035456 4884 generic.go:334] "Generic (PLEG): container finished" podID="80b98453-4751-4c95-ac21-e698601a236a" containerID="43df4ef50fa629422d99eed327632d387ed1fce949f79f6bed1b740ca4a1d526" exitCode=0 Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.035499 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" event={"ID":"80b98453-4751-4c95-ac21-e698601a236a","Type":"ContainerDied","Data":"43df4ef50fa629422d99eed327632d387ed1fce949f79f6bed1b740ca4a1d526"} Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.035532 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" event={"ID":"80b98453-4751-4c95-ac21-e698601a236a","Type":"ContainerStarted","Data":"3f593e7b0cf5df72d8c763eec2062ce14a17b4c725c0a0d5bbddaf6cc4772d54"} Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.081066 4884 scope.go:117] "RemoveContainer" containerID="a76634a9d266b5939bb268b4c0e1335e3be1760802c8de5c00a61d2307ad1bdb" Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.090874 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pjjj8"] Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.109078 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pjjj8"] Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.128902 4884 scope.go:117] "RemoveContainer" containerID="bc0a9b73e5e176265119fcf461e49aabda2a39afe1c3b80e909fb67d9a8c9b43" Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.153854 4884 scope.go:117] "RemoveContainer" containerID="8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564" Dec 10 01:30:02 crc kubenswrapper[4884]: E1210 01:30:02.154370 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564\": container with ID starting with 8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564 not found: ID does not exist" containerID="8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564" Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.154680 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564"} err="failed to get container status \"8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564\": rpc error: code = NotFound desc = could not find container \"8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564\": container with ID starting with 8dd755000acb1862b2da059906a4e63fbbc90e34221db179ec75afafb7086564 not found: ID does not exist" Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.154816 4884 scope.go:117] "RemoveContainer" containerID="a76634a9d266b5939bb268b4c0e1335e3be1760802c8de5c00a61d2307ad1bdb" Dec 10 01:30:02 crc kubenswrapper[4884]: E1210 01:30:02.155459 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a76634a9d266b5939bb268b4c0e1335e3be1760802c8de5c00a61d2307ad1bdb\": container with ID starting with a76634a9d266b5939bb268b4c0e1335e3be1760802c8de5c00a61d2307ad1bdb not found: ID does not exist" containerID="a76634a9d266b5939bb268b4c0e1335e3be1760802c8de5c00a61d2307ad1bdb" Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.155569 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a76634a9d266b5939bb268b4c0e1335e3be1760802c8de5c00a61d2307ad1bdb"} err="failed to get container status \"a76634a9d266b5939bb268b4c0e1335e3be1760802c8de5c00a61d2307ad1bdb\": rpc error: code = NotFound desc = could not find container \"a76634a9d266b5939bb268b4c0e1335e3be1760802c8de5c00a61d2307ad1bdb\": container with ID starting with a76634a9d266b5939bb268b4c0e1335e3be1760802c8de5c00a61d2307ad1bdb not found: ID does not exist" Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.155650 4884 scope.go:117] "RemoveContainer" containerID="bc0a9b73e5e176265119fcf461e49aabda2a39afe1c3b80e909fb67d9a8c9b43" Dec 10 01:30:02 crc kubenswrapper[4884]: E1210 01:30:02.156016 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc0a9b73e5e176265119fcf461e49aabda2a39afe1c3b80e909fb67d9a8c9b43\": container with ID starting with bc0a9b73e5e176265119fcf461e49aabda2a39afe1c3b80e909fb67d9a8c9b43 not found: ID does not exist" containerID="bc0a9b73e5e176265119fcf461e49aabda2a39afe1c3b80e909fb67d9a8c9b43" Dec 10 01:30:02 crc kubenswrapper[4884]: I1210 01:30:02.156114 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc0a9b73e5e176265119fcf461e49aabda2a39afe1c3b80e909fb67d9a8c9b43"} err="failed to get container status \"bc0a9b73e5e176265119fcf461e49aabda2a39afe1c3b80e909fb67d9a8c9b43\": rpc error: code = NotFound desc = could not find container \"bc0a9b73e5e176265119fcf461e49aabda2a39afe1c3b80e909fb67d9a8c9b43\": container with ID starting with bc0a9b73e5e176265119fcf461e49aabda2a39afe1c3b80e909fb67d9a8c9b43 not found: ID does not exist" Dec 10 01:30:03 crc kubenswrapper[4884]: I1210 01:30:03.304462 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f44067d-d377-4166-a60e-ca7a33268462" path="/var/lib/kubelet/pods/5f44067d-d377-4166-a60e-ca7a33268462/volumes" Dec 10 01:30:03 crc kubenswrapper[4884]: I1210 01:30:03.504668 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:03 crc kubenswrapper[4884]: I1210 01:30:03.534886 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/80b98453-4751-4c95-ac21-e698601a236a-config-volume\") pod \"80b98453-4751-4c95-ac21-e698601a236a\" (UID: \"80b98453-4751-4c95-ac21-e698601a236a\") " Dec 10 01:30:03 crc kubenswrapper[4884]: I1210 01:30:03.534961 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqfdl\" (UniqueName: \"kubernetes.io/projected/80b98453-4751-4c95-ac21-e698601a236a-kube-api-access-hqfdl\") pod \"80b98453-4751-4c95-ac21-e698601a236a\" (UID: \"80b98453-4751-4c95-ac21-e698601a236a\") " Dec 10 01:30:03 crc kubenswrapper[4884]: I1210 01:30:03.535123 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/80b98453-4751-4c95-ac21-e698601a236a-secret-volume\") pod \"80b98453-4751-4c95-ac21-e698601a236a\" (UID: \"80b98453-4751-4c95-ac21-e698601a236a\") " Dec 10 01:30:03 crc kubenswrapper[4884]: I1210 01:30:03.535771 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80b98453-4751-4c95-ac21-e698601a236a-config-volume" (OuterVolumeSpecName: "config-volume") pod "80b98453-4751-4c95-ac21-e698601a236a" (UID: "80b98453-4751-4c95-ac21-e698601a236a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 01:30:03 crc kubenswrapper[4884]: I1210 01:30:03.540928 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80b98453-4751-4c95-ac21-e698601a236a-kube-api-access-hqfdl" (OuterVolumeSpecName: "kube-api-access-hqfdl") pod "80b98453-4751-4c95-ac21-e698601a236a" (UID: "80b98453-4751-4c95-ac21-e698601a236a"). InnerVolumeSpecName "kube-api-access-hqfdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:30:03 crc kubenswrapper[4884]: I1210 01:30:03.554603 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/80b98453-4751-4c95-ac21-e698601a236a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "80b98453-4751-4c95-ac21-e698601a236a" (UID: "80b98453-4751-4c95-ac21-e698601a236a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:30:03 crc kubenswrapper[4884]: I1210 01:30:03.647471 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/80b98453-4751-4c95-ac21-e698601a236a-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 01:30:03 crc kubenswrapper[4884]: I1210 01:30:03.647818 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqfdl\" (UniqueName: \"kubernetes.io/projected/80b98453-4751-4c95-ac21-e698601a236a-kube-api-access-hqfdl\") on node \"crc\" DevicePath \"\"" Dec 10 01:30:03 crc kubenswrapper[4884]: I1210 01:30:03.647834 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/80b98453-4751-4c95-ac21-e698601a236a-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 01:30:04 crc kubenswrapper[4884]: I1210 01:30:04.065177 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" event={"ID":"80b98453-4751-4c95-ac21-e698601a236a","Type":"ContainerDied","Data":"3f593e7b0cf5df72d8c763eec2062ce14a17b4c725c0a0d5bbddaf6cc4772d54"} Dec 10 01:30:04 crc kubenswrapper[4884]: I1210 01:30:04.065235 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f593e7b0cf5df72d8c763eec2062ce14a17b4c725c0a0d5bbddaf6cc4772d54" Dec 10 01:30:04 crc kubenswrapper[4884]: I1210 01:30:04.065283 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422170-xp965" Dec 10 01:30:04 crc kubenswrapper[4884]: I1210 01:30:04.614995 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw"] Dec 10 01:30:04 crc kubenswrapper[4884]: I1210 01:30:04.626517 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422125-dqxhw"] Dec 10 01:30:05 crc kubenswrapper[4884]: E1210 01:30:05.290471 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:30:05 crc kubenswrapper[4884]: I1210 01:30:05.305819 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ba8bede-46ca-4caf-be35-617a7c91ef20" path="/var/lib/kubelet/pods/7ba8bede-46ca-4caf-be35-617a7c91ef20/volumes" Dec 10 01:30:13 crc kubenswrapper[4884]: E1210 01:30:13.304230 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:30:18 crc kubenswrapper[4884]: I1210 01:30:18.098067 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:30:18 crc kubenswrapper[4884]: I1210 01:30:18.099087 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:30:18 crc kubenswrapper[4884]: E1210 01:30:18.291565 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:30:24 crc kubenswrapper[4884]: E1210 01:30:24.296483 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:30:32 crc kubenswrapper[4884]: I1210 01:30:32.291895 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 01:30:32 crc kubenswrapper[4884]: E1210 01:30:32.422819 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:30:32 crc kubenswrapper[4884]: E1210 01:30:32.422900 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:30:32 crc kubenswrapper[4884]: E1210 01:30:32.423058 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:30:32 crc kubenswrapper[4884]: E1210 01:30:32.424318 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:30:38 crc kubenswrapper[4884]: E1210 01:30:38.423072 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:30:38 crc kubenswrapper[4884]: E1210 01:30:38.423706 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:30:38 crc kubenswrapper[4884]: E1210 01:30:38.423893 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:30:38 crc kubenswrapper[4884]: E1210 01:30:38.425114 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:30:42 crc kubenswrapper[4884]: I1210 01:30:42.575773 4884 scope.go:117] "RemoveContainer" containerID="5e4973a2393c8c1f289c722babb93f4ff22ce7feeb7d2b6fcce909afc097f39a" Dec 10 01:30:47 crc kubenswrapper[4884]: E1210 01:30:47.303099 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:30:48 crc kubenswrapper[4884]: I1210 01:30:48.099008 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:30:48 crc kubenswrapper[4884]: I1210 01:30:48.099093 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:30:48 crc kubenswrapper[4884]: I1210 01:30:48.099154 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 01:30:48 crc kubenswrapper[4884]: I1210 01:30:48.100016 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 01:30:48 crc kubenswrapper[4884]: I1210 01:30:48.100121 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" gracePeriod=600 Dec 10 01:30:48 crc kubenswrapper[4884]: E1210 01:30:48.224094 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:30:48 crc kubenswrapper[4884]: I1210 01:30:48.670098 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" exitCode=0 Dec 10 01:30:48 crc kubenswrapper[4884]: I1210 01:30:48.670159 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f"} Dec 10 01:30:48 crc kubenswrapper[4884]: I1210 01:30:48.670582 4884 scope.go:117] "RemoveContainer" containerID="1e68ee0e36c463fd39e73cdb3752be5c9fb329079efa100c1ea047c36f426cba" Dec 10 01:30:48 crc kubenswrapper[4884]: I1210 01:30:48.672182 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:30:48 crc kubenswrapper[4884]: E1210 01:30:48.673083 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:30:50 crc kubenswrapper[4884]: E1210 01:30:50.290562 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:31:00 crc kubenswrapper[4884]: I1210 01:31:00.287165 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:31:00 crc kubenswrapper[4884]: E1210 01:31:00.288372 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:31:01 crc kubenswrapper[4884]: E1210 01:31:01.293706 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:31:02 crc kubenswrapper[4884]: E1210 01:31:02.289577 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:31:13 crc kubenswrapper[4884]: E1210 01:31:13.289127 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:31:14 crc kubenswrapper[4884]: E1210 01:31:14.290234 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:31:15 crc kubenswrapper[4884]: I1210 01:31:15.288675 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:31:15 crc kubenswrapper[4884]: E1210 01:31:15.289018 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:31:25 crc kubenswrapper[4884]: E1210 01:31:25.290710 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:31:25 crc kubenswrapper[4884]: E1210 01:31:25.291597 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:31:27 crc kubenswrapper[4884]: I1210 01:31:27.299932 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:31:27 crc kubenswrapper[4884]: E1210 01:31:27.300834 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:31:36 crc kubenswrapper[4884]: E1210 01:31:36.290989 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:31:39 crc kubenswrapper[4884]: E1210 01:31:39.292528 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:31:40 crc kubenswrapper[4884]: I1210 01:31:40.287480 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:31:40 crc kubenswrapper[4884]: E1210 01:31:40.287950 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:31:50 crc kubenswrapper[4884]: E1210 01:31:50.295015 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:31:50 crc kubenswrapper[4884]: E1210 01:31:50.295065 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:31:51 crc kubenswrapper[4884]: I1210 01:31:51.287274 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:31:51 crc kubenswrapper[4884]: E1210 01:31:51.288127 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:32:04 crc kubenswrapper[4884]: I1210 01:32:04.288216 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:32:04 crc kubenswrapper[4884]: E1210 01:32:04.289297 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:32:04 crc kubenswrapper[4884]: E1210 01:32:04.290923 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:32:04 crc kubenswrapper[4884]: E1210 01:32:04.291101 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:32:16 crc kubenswrapper[4884]: I1210 01:32:16.288910 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:32:16 crc kubenswrapper[4884]: E1210 01:32:16.290117 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:32:16 crc kubenswrapper[4884]: E1210 01:32:16.291235 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:32:19 crc kubenswrapper[4884]: E1210 01:32:19.289515 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:32:29 crc kubenswrapper[4884]: I1210 01:32:29.288040 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:32:29 crc kubenswrapper[4884]: E1210 01:32:29.291167 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:32:30 crc kubenswrapper[4884]: E1210 01:32:30.292961 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:32:31 crc kubenswrapper[4884]: E1210 01:32:31.290117 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:32:42 crc kubenswrapper[4884]: I1210 01:32:42.287677 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:32:42 crc kubenswrapper[4884]: E1210 01:32:42.288802 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:32:42 crc kubenswrapper[4884]: E1210 01:32:42.293348 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:32:42 crc kubenswrapper[4884]: I1210 01:32:42.756080 4884 scope.go:117] "RemoveContainer" containerID="107d6a5c6c3aeb03a2d4717992150ae7a2d408a5e350968a610ce6b7596e9240" Dec 10 01:32:42 crc kubenswrapper[4884]: I1210 01:32:42.788858 4884 scope.go:117] "RemoveContainer" containerID="11486516eb623a83a8790e3b1690095e402b2afbd5bdc7584bb2dfc60085ef0f" Dec 10 01:32:42 crc kubenswrapper[4884]: I1210 01:32:42.867811 4884 scope.go:117] "RemoveContainer" containerID="95df6d945b891aaa6f7368898ad66b0cead8f62716cdee53c3b6996d3bc76700" Dec 10 01:32:45 crc kubenswrapper[4884]: E1210 01:32:45.295541 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:32:54 crc kubenswrapper[4884]: I1210 01:32:54.288031 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:32:54 crc kubenswrapper[4884]: E1210 01:32:54.289217 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:32:56 crc kubenswrapper[4884]: E1210 01:32:56.290578 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:32:57 crc kubenswrapper[4884]: E1210 01:32:57.306892 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:33:05 crc kubenswrapper[4884]: I1210 01:33:05.287681 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:33:05 crc kubenswrapper[4884]: E1210 01:33:05.288527 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:33:07 crc kubenswrapper[4884]: E1210 01:33:07.298177 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:33:10 crc kubenswrapper[4884]: E1210 01:33:10.290162 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:33:18 crc kubenswrapper[4884]: I1210 01:33:18.287738 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:33:18 crc kubenswrapper[4884]: E1210 01:33:18.288790 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:33:18 crc kubenswrapper[4884]: E1210 01:33:18.289188 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:33:22 crc kubenswrapper[4884]: E1210 01:33:22.289099 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.140794 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mqw4m"] Dec 10 01:33:25 crc kubenswrapper[4884]: E1210 01:33:25.141560 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f44067d-d377-4166-a60e-ca7a33268462" containerName="registry-server" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.141576 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f44067d-d377-4166-a60e-ca7a33268462" containerName="registry-server" Dec 10 01:33:25 crc kubenswrapper[4884]: E1210 01:33:25.141624 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80b98453-4751-4c95-ac21-e698601a236a" containerName="collect-profiles" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.141632 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="80b98453-4751-4c95-ac21-e698601a236a" containerName="collect-profiles" Dec 10 01:33:25 crc kubenswrapper[4884]: E1210 01:33:25.141652 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f44067d-d377-4166-a60e-ca7a33268462" containerName="extract-content" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.141681 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f44067d-d377-4166-a60e-ca7a33268462" containerName="extract-content" Dec 10 01:33:25 crc kubenswrapper[4884]: E1210 01:33:25.141700 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f44067d-d377-4166-a60e-ca7a33268462" containerName="extract-utilities" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.141708 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f44067d-d377-4166-a60e-ca7a33268462" containerName="extract-utilities" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.141944 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="80b98453-4751-4c95-ac21-e698601a236a" containerName="collect-profiles" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.141980 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f44067d-d377-4166-a60e-ca7a33268462" containerName="registry-server" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.144030 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.162177 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mqw4m"] Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.265860 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93b200ef-5570-4645-b44e-d9ae86b5feba-utilities\") pod \"redhat-marketplace-mqw4m\" (UID: \"93b200ef-5570-4645-b44e-d9ae86b5feba\") " pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.266272 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93b200ef-5570-4645-b44e-d9ae86b5feba-catalog-content\") pod \"redhat-marketplace-mqw4m\" (UID: \"93b200ef-5570-4645-b44e-d9ae86b5feba\") " pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.266602 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lk6jj\" (UniqueName: \"kubernetes.io/projected/93b200ef-5570-4645-b44e-d9ae86b5feba-kube-api-access-lk6jj\") pod \"redhat-marketplace-mqw4m\" (UID: \"93b200ef-5570-4645-b44e-d9ae86b5feba\") " pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.368506 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lk6jj\" (UniqueName: \"kubernetes.io/projected/93b200ef-5570-4645-b44e-d9ae86b5feba-kube-api-access-lk6jj\") pod \"redhat-marketplace-mqw4m\" (UID: \"93b200ef-5570-4645-b44e-d9ae86b5feba\") " pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.368652 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93b200ef-5570-4645-b44e-d9ae86b5feba-utilities\") pod \"redhat-marketplace-mqw4m\" (UID: \"93b200ef-5570-4645-b44e-d9ae86b5feba\") " pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.368675 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93b200ef-5570-4645-b44e-d9ae86b5feba-catalog-content\") pod \"redhat-marketplace-mqw4m\" (UID: \"93b200ef-5570-4645-b44e-d9ae86b5feba\") " pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.369292 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93b200ef-5570-4645-b44e-d9ae86b5feba-catalog-content\") pod \"redhat-marketplace-mqw4m\" (UID: \"93b200ef-5570-4645-b44e-d9ae86b5feba\") " pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.370159 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93b200ef-5570-4645-b44e-d9ae86b5feba-utilities\") pod \"redhat-marketplace-mqw4m\" (UID: \"93b200ef-5570-4645-b44e-d9ae86b5feba\") " pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.388262 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lk6jj\" (UniqueName: \"kubernetes.io/projected/93b200ef-5570-4645-b44e-d9ae86b5feba-kube-api-access-lk6jj\") pod \"redhat-marketplace-mqw4m\" (UID: \"93b200ef-5570-4645-b44e-d9ae86b5feba\") " pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:25 crc kubenswrapper[4884]: I1210 01:33:25.473900 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:26 crc kubenswrapper[4884]: I1210 01:33:26.003526 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mqw4m"] Dec 10 01:33:26 crc kubenswrapper[4884]: I1210 01:33:26.758759 4884 generic.go:334] "Generic (PLEG): container finished" podID="93b200ef-5570-4645-b44e-d9ae86b5feba" containerID="248a8299d59fd532f1bafd13a731ce2c8c105c3a5f6e3a274c9f13a8802707c4" exitCode=0 Dec 10 01:33:26 crc kubenswrapper[4884]: I1210 01:33:26.758824 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mqw4m" event={"ID":"93b200ef-5570-4645-b44e-d9ae86b5feba","Type":"ContainerDied","Data":"248a8299d59fd532f1bafd13a731ce2c8c105c3a5f6e3a274c9f13a8802707c4"} Dec 10 01:33:26 crc kubenswrapper[4884]: I1210 01:33:26.759137 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mqw4m" event={"ID":"93b200ef-5570-4645-b44e-d9ae86b5feba","Type":"ContainerStarted","Data":"b7371fb30cfa7ec661f6c11d15fbd7875060b1a4ecf263036a5d8cdad8613005"} Dec 10 01:33:27 crc kubenswrapper[4884]: I1210 01:33:27.772009 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mqw4m" event={"ID":"93b200ef-5570-4645-b44e-d9ae86b5feba","Type":"ContainerStarted","Data":"e43012ea26a433e9c85a92db6f3d4bcaeae32ff3dced31193841a3d3ce617f3d"} Dec 10 01:33:28 crc kubenswrapper[4884]: I1210 01:33:28.787258 4884 generic.go:334] "Generic (PLEG): container finished" podID="93b200ef-5570-4645-b44e-d9ae86b5feba" containerID="e43012ea26a433e9c85a92db6f3d4bcaeae32ff3dced31193841a3d3ce617f3d" exitCode=0 Dec 10 01:33:28 crc kubenswrapper[4884]: I1210 01:33:28.787480 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mqw4m" event={"ID":"93b200ef-5570-4645-b44e-d9ae86b5feba","Type":"ContainerDied","Data":"e43012ea26a433e9c85a92db6f3d4bcaeae32ff3dced31193841a3d3ce617f3d"} Dec 10 01:33:29 crc kubenswrapper[4884]: I1210 01:33:29.803126 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mqw4m" event={"ID":"93b200ef-5570-4645-b44e-d9ae86b5feba","Type":"ContainerStarted","Data":"358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c"} Dec 10 01:33:29 crc kubenswrapper[4884]: I1210 01:33:29.831366 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mqw4m" podStartSLOduration=2.352431293 podStartE2EDuration="4.831345737s" podCreationTimestamp="2025-12-10 01:33:25 +0000 UTC" firstStartedPulling="2025-12-10 01:33:26.7605908 +0000 UTC m=+3779.838547927" lastFinishedPulling="2025-12-10 01:33:29.239505244 +0000 UTC m=+3782.317462371" observedRunningTime="2025-12-10 01:33:29.819864938 +0000 UTC m=+3782.897822085" watchObservedRunningTime="2025-12-10 01:33:29.831345737 +0000 UTC m=+3782.909302864" Dec 10 01:33:31 crc kubenswrapper[4884]: E1210 01:33:31.293515 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:33:33 crc kubenswrapper[4884]: I1210 01:33:33.287478 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:33:33 crc kubenswrapper[4884]: E1210 01:33:33.288210 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:33:33 crc kubenswrapper[4884]: E1210 01:33:33.290920 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:33:35 crc kubenswrapper[4884]: I1210 01:33:35.474648 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:35 crc kubenswrapper[4884]: I1210 01:33:35.475000 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:35 crc kubenswrapper[4884]: I1210 01:33:35.550266 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:35 crc kubenswrapper[4884]: I1210 01:33:35.930840 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:36 crc kubenswrapper[4884]: I1210 01:33:36.003859 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mqw4m"] Dec 10 01:33:37 crc kubenswrapper[4884]: I1210 01:33:37.906731 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mqw4m" podUID="93b200ef-5570-4645-b44e-d9ae86b5feba" containerName="registry-server" containerID="cri-o://358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c" gracePeriod=2 Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.508310 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.694505 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93b200ef-5570-4645-b44e-d9ae86b5feba-catalog-content\") pod \"93b200ef-5570-4645-b44e-d9ae86b5feba\" (UID: \"93b200ef-5570-4645-b44e-d9ae86b5feba\") " Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.694868 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lk6jj\" (UniqueName: \"kubernetes.io/projected/93b200ef-5570-4645-b44e-d9ae86b5feba-kube-api-access-lk6jj\") pod \"93b200ef-5570-4645-b44e-d9ae86b5feba\" (UID: \"93b200ef-5570-4645-b44e-d9ae86b5feba\") " Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.695012 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93b200ef-5570-4645-b44e-d9ae86b5feba-utilities\") pod \"93b200ef-5570-4645-b44e-d9ae86b5feba\" (UID: \"93b200ef-5570-4645-b44e-d9ae86b5feba\") " Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.696159 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93b200ef-5570-4645-b44e-d9ae86b5feba-utilities" (OuterVolumeSpecName: "utilities") pod "93b200ef-5570-4645-b44e-d9ae86b5feba" (UID: "93b200ef-5570-4645-b44e-d9ae86b5feba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.700370 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93b200ef-5570-4645-b44e-d9ae86b5feba-kube-api-access-lk6jj" (OuterVolumeSpecName: "kube-api-access-lk6jj") pod "93b200ef-5570-4645-b44e-d9ae86b5feba" (UID: "93b200ef-5570-4645-b44e-d9ae86b5feba"). InnerVolumeSpecName "kube-api-access-lk6jj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.714742 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93b200ef-5570-4645-b44e-d9ae86b5feba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93b200ef-5570-4645-b44e-d9ae86b5feba" (UID: "93b200ef-5570-4645-b44e-d9ae86b5feba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.798177 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93b200ef-5570-4645-b44e-d9ae86b5feba-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.798225 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93b200ef-5570-4645-b44e-d9ae86b5feba-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.798245 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lk6jj\" (UniqueName: \"kubernetes.io/projected/93b200ef-5570-4645-b44e-d9ae86b5feba-kube-api-access-lk6jj\") on node \"crc\" DevicePath \"\"" Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.922599 4884 generic.go:334] "Generic (PLEG): container finished" podID="93b200ef-5570-4645-b44e-d9ae86b5feba" containerID="358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c" exitCode=0 Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.922653 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mqw4m" event={"ID":"93b200ef-5570-4645-b44e-d9ae86b5feba","Type":"ContainerDied","Data":"358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c"} Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.922695 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mqw4m" Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.922716 4884 scope.go:117] "RemoveContainer" containerID="358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c" Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.922702 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mqw4m" event={"ID":"93b200ef-5570-4645-b44e-d9ae86b5feba","Type":"ContainerDied","Data":"b7371fb30cfa7ec661f6c11d15fbd7875060b1a4ecf263036a5d8cdad8613005"} Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.967408 4884 scope.go:117] "RemoveContainer" containerID="e43012ea26a433e9c85a92db6f3d4bcaeae32ff3dced31193841a3d3ce617f3d" Dec 10 01:33:38 crc kubenswrapper[4884]: I1210 01:33:38.989080 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mqw4m"] Dec 10 01:33:39 crc kubenswrapper[4884]: I1210 01:33:39.005877 4884 scope.go:117] "RemoveContainer" containerID="248a8299d59fd532f1bafd13a731ce2c8c105c3a5f6e3a274c9f13a8802707c4" Dec 10 01:33:39 crc kubenswrapper[4884]: I1210 01:33:39.009452 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mqw4m"] Dec 10 01:33:39 crc kubenswrapper[4884]: I1210 01:33:39.090050 4884 scope.go:117] "RemoveContainer" containerID="358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c" Dec 10 01:33:39 crc kubenswrapper[4884]: E1210 01:33:39.090601 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c\": container with ID starting with 358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c not found: ID does not exist" containerID="358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c" Dec 10 01:33:39 crc kubenswrapper[4884]: I1210 01:33:39.090669 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c"} err="failed to get container status \"358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c\": rpc error: code = NotFound desc = could not find container \"358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c\": container with ID starting with 358bf271201a138caa72af0355bde6585c42ff1b1a1522527c5b260f5f20668c not found: ID does not exist" Dec 10 01:33:39 crc kubenswrapper[4884]: I1210 01:33:39.090719 4884 scope.go:117] "RemoveContainer" containerID="e43012ea26a433e9c85a92db6f3d4bcaeae32ff3dced31193841a3d3ce617f3d" Dec 10 01:33:39 crc kubenswrapper[4884]: E1210 01:33:39.091115 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e43012ea26a433e9c85a92db6f3d4bcaeae32ff3dced31193841a3d3ce617f3d\": container with ID starting with e43012ea26a433e9c85a92db6f3d4bcaeae32ff3dced31193841a3d3ce617f3d not found: ID does not exist" containerID="e43012ea26a433e9c85a92db6f3d4bcaeae32ff3dced31193841a3d3ce617f3d" Dec 10 01:33:39 crc kubenswrapper[4884]: I1210 01:33:39.091181 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e43012ea26a433e9c85a92db6f3d4bcaeae32ff3dced31193841a3d3ce617f3d"} err="failed to get container status \"e43012ea26a433e9c85a92db6f3d4bcaeae32ff3dced31193841a3d3ce617f3d\": rpc error: code = NotFound desc = could not find container \"e43012ea26a433e9c85a92db6f3d4bcaeae32ff3dced31193841a3d3ce617f3d\": container with ID starting with e43012ea26a433e9c85a92db6f3d4bcaeae32ff3dced31193841a3d3ce617f3d not found: ID does not exist" Dec 10 01:33:39 crc kubenswrapper[4884]: I1210 01:33:39.091223 4884 scope.go:117] "RemoveContainer" containerID="248a8299d59fd532f1bafd13a731ce2c8c105c3a5f6e3a274c9f13a8802707c4" Dec 10 01:33:39 crc kubenswrapper[4884]: E1210 01:33:39.091835 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"248a8299d59fd532f1bafd13a731ce2c8c105c3a5f6e3a274c9f13a8802707c4\": container with ID starting with 248a8299d59fd532f1bafd13a731ce2c8c105c3a5f6e3a274c9f13a8802707c4 not found: ID does not exist" containerID="248a8299d59fd532f1bafd13a731ce2c8c105c3a5f6e3a274c9f13a8802707c4" Dec 10 01:33:39 crc kubenswrapper[4884]: I1210 01:33:39.091902 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"248a8299d59fd532f1bafd13a731ce2c8c105c3a5f6e3a274c9f13a8802707c4"} err="failed to get container status \"248a8299d59fd532f1bafd13a731ce2c8c105c3a5f6e3a274c9f13a8802707c4\": rpc error: code = NotFound desc = could not find container \"248a8299d59fd532f1bafd13a731ce2c8c105c3a5f6e3a274c9f13a8802707c4\": container with ID starting with 248a8299d59fd532f1bafd13a731ce2c8c105c3a5f6e3a274c9f13a8802707c4 not found: ID does not exist" Dec 10 01:33:39 crc kubenswrapper[4884]: I1210 01:33:39.305569 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93b200ef-5570-4645-b44e-d9ae86b5feba" path="/var/lib/kubelet/pods/93b200ef-5570-4645-b44e-d9ae86b5feba/volumes" Dec 10 01:33:45 crc kubenswrapper[4884]: E1210 01:33:45.298751 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:33:47 crc kubenswrapper[4884]: I1210 01:33:47.302305 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:33:47 crc kubenswrapper[4884]: E1210 01:33:47.303155 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:33:48 crc kubenswrapper[4884]: E1210 01:33:48.289370 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:34:00 crc kubenswrapper[4884]: E1210 01:34:00.289792 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:34:01 crc kubenswrapper[4884]: I1210 01:34:01.287654 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:34:01 crc kubenswrapper[4884]: E1210 01:34:01.288296 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:34:01 crc kubenswrapper[4884]: E1210 01:34:01.291216 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:34:12 crc kubenswrapper[4884]: I1210 01:34:12.288349 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:34:12 crc kubenswrapper[4884]: E1210 01:34:12.289334 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:34:12 crc kubenswrapper[4884]: E1210 01:34:12.291861 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:34:16 crc kubenswrapper[4884]: E1210 01:34:16.292338 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:34:23 crc kubenswrapper[4884]: E1210 01:34:23.290224 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:34:27 crc kubenswrapper[4884]: I1210 01:34:27.303483 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:34:27 crc kubenswrapper[4884]: E1210 01:34:27.304090 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:34:28 crc kubenswrapper[4884]: E1210 01:34:28.290195 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:34:36 crc kubenswrapper[4884]: E1210 01:34:36.293568 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:34:38 crc kubenswrapper[4884]: I1210 01:34:38.287909 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:34:38 crc kubenswrapper[4884]: E1210 01:34:38.289069 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:34:39 crc kubenswrapper[4884]: E1210 01:34:39.291745 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.076379 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p"] Dec 10 01:34:45 crc kubenswrapper[4884]: E1210 01:34:45.078051 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93b200ef-5570-4645-b44e-d9ae86b5feba" containerName="extract-content" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.078084 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93b200ef-5570-4645-b44e-d9ae86b5feba" containerName="extract-content" Dec 10 01:34:45 crc kubenswrapper[4884]: E1210 01:34:45.078121 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93b200ef-5570-4645-b44e-d9ae86b5feba" containerName="registry-server" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.078139 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93b200ef-5570-4645-b44e-d9ae86b5feba" containerName="registry-server" Dec 10 01:34:45 crc kubenswrapper[4884]: E1210 01:34:45.078211 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93b200ef-5570-4645-b44e-d9ae86b5feba" containerName="extract-utilities" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.078230 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="93b200ef-5570-4645-b44e-d9ae86b5feba" containerName="extract-utilities" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.078802 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="93b200ef-5570-4645-b44e-d9ae86b5feba" containerName="registry-server" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.080102 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.083059 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-q72zm" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.083311 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.096736 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.096807 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.096737 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.106102 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p"] Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.183064 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.183128 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.183175 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.183241 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.183291 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.183318 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.183356 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkx8f\" (UniqueName: \"kubernetes.io/projected/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-kube-api-access-fkx8f\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.285450 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.285515 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.285567 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.285645 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.285699 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.285724 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.285766 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkx8f\" (UniqueName: \"kubernetes.io/projected/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-kube-api-access-fkx8f\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.292615 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.292752 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.293484 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.294498 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.296081 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.307595 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.312012 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkx8f\" (UniqueName: \"kubernetes.io/projected/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-kube-api-access-fkx8f\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.408425 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:34:45 crc kubenswrapper[4884]: W1210 01:34:45.965396 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43fa1b12_f3b7_4bb1_a632_f3f756e29d48.slice/crio-0ebaa737dc565468c28adf01c757efc233e32c59666d31807dcf467abebe91b8 WatchSource:0}: Error finding container 0ebaa737dc565468c28adf01c757efc233e32c59666d31807dcf467abebe91b8: Status 404 returned error can't find the container with id 0ebaa737dc565468c28adf01c757efc233e32c59666d31807dcf467abebe91b8 Dec 10 01:34:45 crc kubenswrapper[4884]: I1210 01:34:45.974328 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p"] Dec 10 01:34:46 crc kubenswrapper[4884]: I1210 01:34:46.881860 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" event={"ID":"43fa1b12-f3b7-4bb1-a632-f3f756e29d48","Type":"ContainerStarted","Data":"0ebaa737dc565468c28adf01c757efc233e32c59666d31807dcf467abebe91b8"} Dec 10 01:34:47 crc kubenswrapper[4884]: I1210 01:34:47.898403 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" event={"ID":"43fa1b12-f3b7-4bb1-a632-f3f756e29d48","Type":"ContainerStarted","Data":"10c385c9af72f665c74e089ea64e85111612040ebb47e859dea373282edba8a6"} Dec 10 01:34:47 crc kubenswrapper[4884]: I1210 01:34:47.937685 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" podStartSLOduration=2.273905734 podStartE2EDuration="2.937663282s" podCreationTimestamp="2025-12-10 01:34:45 +0000 UTC" firstStartedPulling="2025-12-10 01:34:45.967392603 +0000 UTC m=+3859.045349720" lastFinishedPulling="2025-12-10 01:34:46.631150111 +0000 UTC m=+3859.709107268" observedRunningTime="2025-12-10 01:34:47.925797762 +0000 UTC m=+3861.003754899" watchObservedRunningTime="2025-12-10 01:34:47.937663282 +0000 UTC m=+3861.015620399" Dec 10 01:34:48 crc kubenswrapper[4884]: E1210 01:34:48.289872 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:34:49 crc kubenswrapper[4884]: I1210 01:34:49.287717 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:34:49 crc kubenswrapper[4884]: E1210 01:34:49.288393 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:34:54 crc kubenswrapper[4884]: E1210 01:34:54.293774 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:35:01 crc kubenswrapper[4884]: I1210 01:35:01.287136 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:35:01 crc kubenswrapper[4884]: E1210 01:35:01.288870 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:35:01 crc kubenswrapper[4884]: E1210 01:35:01.289113 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:35:07 crc kubenswrapper[4884]: E1210 01:35:07.302004 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.026070 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b9cg4"] Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.030783 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.040808 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b9cg4"] Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.157403 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c3668cb-2285-4d10-8759-344eaefa1790-utilities\") pod \"redhat-operators-b9cg4\" (UID: \"0c3668cb-2285-4d10-8759-344eaefa1790\") " pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.157785 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c3668cb-2285-4d10-8759-344eaefa1790-catalog-content\") pod \"redhat-operators-b9cg4\" (UID: \"0c3668cb-2285-4d10-8759-344eaefa1790\") " pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.157942 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ghkt\" (UniqueName: \"kubernetes.io/projected/0c3668cb-2285-4d10-8759-344eaefa1790-kube-api-access-9ghkt\") pod \"redhat-operators-b9cg4\" (UID: \"0c3668cb-2285-4d10-8759-344eaefa1790\") " pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.260149 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c3668cb-2285-4d10-8759-344eaefa1790-utilities\") pod \"redhat-operators-b9cg4\" (UID: \"0c3668cb-2285-4d10-8759-344eaefa1790\") " pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.260268 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c3668cb-2285-4d10-8759-344eaefa1790-catalog-content\") pod \"redhat-operators-b9cg4\" (UID: \"0c3668cb-2285-4d10-8759-344eaefa1790\") " pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.260346 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ghkt\" (UniqueName: \"kubernetes.io/projected/0c3668cb-2285-4d10-8759-344eaefa1790-kube-api-access-9ghkt\") pod \"redhat-operators-b9cg4\" (UID: \"0c3668cb-2285-4d10-8759-344eaefa1790\") " pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.261387 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c3668cb-2285-4d10-8759-344eaefa1790-catalog-content\") pod \"redhat-operators-b9cg4\" (UID: \"0c3668cb-2285-4d10-8759-344eaefa1790\") " pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.261629 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c3668cb-2285-4d10-8759-344eaefa1790-utilities\") pod \"redhat-operators-b9cg4\" (UID: \"0c3668cb-2285-4d10-8759-344eaefa1790\") " pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.291734 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ghkt\" (UniqueName: \"kubernetes.io/projected/0c3668cb-2285-4d10-8759-344eaefa1790-kube-api-access-9ghkt\") pod \"redhat-operators-b9cg4\" (UID: \"0c3668cb-2285-4d10-8759-344eaefa1790\") " pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.354712 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:09 crc kubenswrapper[4884]: I1210 01:35:09.902301 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b9cg4"] Dec 10 01:35:10 crc kubenswrapper[4884]: I1210 01:35:10.190091 4884 generic.go:334] "Generic (PLEG): container finished" podID="0c3668cb-2285-4d10-8759-344eaefa1790" containerID="1f9c4530f583ab0f8682df45cd72a76557f8db3be5422d5e164378efdbf1d71f" exitCode=0 Dec 10 01:35:10 crc kubenswrapper[4884]: I1210 01:35:10.190208 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9cg4" event={"ID":"0c3668cb-2285-4d10-8759-344eaefa1790","Type":"ContainerDied","Data":"1f9c4530f583ab0f8682df45cd72a76557f8db3be5422d5e164378efdbf1d71f"} Dec 10 01:35:10 crc kubenswrapper[4884]: I1210 01:35:10.190410 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9cg4" event={"ID":"0c3668cb-2285-4d10-8759-344eaefa1790","Type":"ContainerStarted","Data":"1be0ad9952325972049ac0d723440fc15a193b323bfa8d138538254eaeebea03"} Dec 10 01:35:11 crc kubenswrapper[4884]: I1210 01:35:11.204137 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9cg4" event={"ID":"0c3668cb-2285-4d10-8759-344eaefa1790","Type":"ContainerStarted","Data":"9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db"} Dec 10 01:35:14 crc kubenswrapper[4884]: E1210 01:35:14.190810 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c3668cb_2285_4d10_8759_344eaefa1790.slice/crio-conmon-9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db.scope\": RecentStats: unable to find data in memory cache]" Dec 10 01:35:14 crc kubenswrapper[4884]: I1210 01:35:14.237847 4884 generic.go:334] "Generic (PLEG): container finished" podID="0c3668cb-2285-4d10-8759-344eaefa1790" containerID="9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db" exitCode=0 Dec 10 01:35:14 crc kubenswrapper[4884]: I1210 01:35:14.237892 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9cg4" event={"ID":"0c3668cb-2285-4d10-8759-344eaefa1790","Type":"ContainerDied","Data":"9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db"} Dec 10 01:35:14 crc kubenswrapper[4884]: E1210 01:35:14.289324 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:35:15 crc kubenswrapper[4884]: I1210 01:35:15.287726 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:35:15 crc kubenswrapper[4884]: E1210 01:35:15.289095 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:35:16 crc kubenswrapper[4884]: I1210 01:35:16.264015 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9cg4" event={"ID":"0c3668cb-2285-4d10-8759-344eaefa1790","Type":"ContainerStarted","Data":"d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960"} Dec 10 01:35:16 crc kubenswrapper[4884]: I1210 01:35:16.290236 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b9cg4" podStartSLOduration=3.414859819 podStartE2EDuration="8.290215337s" podCreationTimestamp="2025-12-10 01:35:08 +0000 UTC" firstStartedPulling="2025-12-10 01:35:10.191812753 +0000 UTC m=+3883.269769870" lastFinishedPulling="2025-12-10 01:35:15.067168271 +0000 UTC m=+3888.145125388" observedRunningTime="2025-12-10 01:35:16.283715101 +0000 UTC m=+3889.361672308" watchObservedRunningTime="2025-12-10 01:35:16.290215337 +0000 UTC m=+3889.368172464" Dec 10 01:35:19 crc kubenswrapper[4884]: I1210 01:35:19.355575 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:19 crc kubenswrapper[4884]: I1210 01:35:19.355840 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:20 crc kubenswrapper[4884]: I1210 01:35:20.462018 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-b9cg4" podUID="0c3668cb-2285-4d10-8759-344eaefa1790" containerName="registry-server" probeResult="failure" output=< Dec 10 01:35:20 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 01:35:20 crc kubenswrapper[4884]: > Dec 10 01:35:21 crc kubenswrapper[4884]: E1210 01:35:21.291404 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:35:26 crc kubenswrapper[4884]: I1210 01:35:26.287235 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:35:26 crc kubenswrapper[4884]: E1210 01:35:26.288508 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:35:29 crc kubenswrapper[4884]: E1210 01:35:29.292719 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:35:29 crc kubenswrapper[4884]: I1210 01:35:29.447124 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:29 crc kubenswrapper[4884]: I1210 01:35:29.519752 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:29 crc kubenswrapper[4884]: I1210 01:35:29.713829 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b9cg4"] Dec 10 01:35:31 crc kubenswrapper[4884]: I1210 01:35:31.456642 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b9cg4" podUID="0c3668cb-2285-4d10-8759-344eaefa1790" containerName="registry-server" containerID="cri-o://d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960" gracePeriod=2 Dec 10 01:35:31 crc kubenswrapper[4884]: I1210 01:35:31.926009 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.007838 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9ghkt\" (UniqueName: \"kubernetes.io/projected/0c3668cb-2285-4d10-8759-344eaefa1790-kube-api-access-9ghkt\") pod \"0c3668cb-2285-4d10-8759-344eaefa1790\" (UID: \"0c3668cb-2285-4d10-8759-344eaefa1790\") " Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.008052 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c3668cb-2285-4d10-8759-344eaefa1790-catalog-content\") pod \"0c3668cb-2285-4d10-8759-344eaefa1790\" (UID: \"0c3668cb-2285-4d10-8759-344eaefa1790\") " Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.008129 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c3668cb-2285-4d10-8759-344eaefa1790-utilities\") pod \"0c3668cb-2285-4d10-8759-344eaefa1790\" (UID: \"0c3668cb-2285-4d10-8759-344eaefa1790\") " Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.008857 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c3668cb-2285-4d10-8759-344eaefa1790-utilities" (OuterVolumeSpecName: "utilities") pod "0c3668cb-2285-4d10-8759-344eaefa1790" (UID: "0c3668cb-2285-4d10-8759-344eaefa1790"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.009095 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0c3668cb-2285-4d10-8759-344eaefa1790-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.013302 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c3668cb-2285-4d10-8759-344eaefa1790-kube-api-access-9ghkt" (OuterVolumeSpecName: "kube-api-access-9ghkt") pod "0c3668cb-2285-4d10-8759-344eaefa1790" (UID: "0c3668cb-2285-4d10-8759-344eaefa1790"). InnerVolumeSpecName "kube-api-access-9ghkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.109249 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0c3668cb-2285-4d10-8759-344eaefa1790-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0c3668cb-2285-4d10-8759-344eaefa1790" (UID: "0c3668cb-2285-4d10-8759-344eaefa1790"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.109984 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9ghkt\" (UniqueName: \"kubernetes.io/projected/0c3668cb-2285-4d10-8759-344eaefa1790-kube-api-access-9ghkt\") on node \"crc\" DevicePath \"\"" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.110017 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0c3668cb-2285-4d10-8759-344eaefa1790-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.478842 4884 generic.go:334] "Generic (PLEG): container finished" podID="0c3668cb-2285-4d10-8759-344eaefa1790" containerID="d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960" exitCode=0 Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.478891 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9cg4" event={"ID":"0c3668cb-2285-4d10-8759-344eaefa1790","Type":"ContainerDied","Data":"d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960"} Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.478929 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b9cg4" event={"ID":"0c3668cb-2285-4d10-8759-344eaefa1790","Type":"ContainerDied","Data":"1be0ad9952325972049ac0d723440fc15a193b323bfa8d138538254eaeebea03"} Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.478955 4884 scope.go:117] "RemoveContainer" containerID="d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.479031 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b9cg4" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.506350 4884 scope.go:117] "RemoveContainer" containerID="9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.539893 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b9cg4"] Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.543763 4884 scope.go:117] "RemoveContainer" containerID="1f9c4530f583ab0f8682df45cd72a76557f8db3be5422d5e164378efdbf1d71f" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.553114 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b9cg4"] Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.621904 4884 scope.go:117] "RemoveContainer" containerID="d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960" Dec 10 01:35:32 crc kubenswrapper[4884]: E1210 01:35:32.622406 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960\": container with ID starting with d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960 not found: ID does not exist" containerID="d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.622459 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960"} err="failed to get container status \"d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960\": rpc error: code = NotFound desc = could not find container \"d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960\": container with ID starting with d4b1c2845e5d240ac906383433c3ab833ecccbc02fa1f1c08abc3c1a922c3960 not found: ID does not exist" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.622484 4884 scope.go:117] "RemoveContainer" containerID="9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db" Dec 10 01:35:32 crc kubenswrapper[4884]: E1210 01:35:32.623551 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db\": container with ID starting with 9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db not found: ID does not exist" containerID="9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.623618 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db"} err="failed to get container status \"9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db\": rpc error: code = NotFound desc = could not find container \"9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db\": container with ID starting with 9fc9a1a9c1810ae1d26e5098b36d56a2004f1cb75baff066ecf05b6ea129e7db not found: ID does not exist" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.623654 4884 scope.go:117] "RemoveContainer" containerID="1f9c4530f583ab0f8682df45cd72a76557f8db3be5422d5e164378efdbf1d71f" Dec 10 01:35:32 crc kubenswrapper[4884]: E1210 01:35:32.624028 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f9c4530f583ab0f8682df45cd72a76557f8db3be5422d5e164378efdbf1d71f\": container with ID starting with 1f9c4530f583ab0f8682df45cd72a76557f8db3be5422d5e164378efdbf1d71f not found: ID does not exist" containerID="1f9c4530f583ab0f8682df45cd72a76557f8db3be5422d5e164378efdbf1d71f" Dec 10 01:35:32 crc kubenswrapper[4884]: I1210 01:35:32.624060 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f9c4530f583ab0f8682df45cd72a76557f8db3be5422d5e164378efdbf1d71f"} err="failed to get container status \"1f9c4530f583ab0f8682df45cd72a76557f8db3be5422d5e164378efdbf1d71f\": rpc error: code = NotFound desc = could not find container \"1f9c4530f583ab0f8682df45cd72a76557f8db3be5422d5e164378efdbf1d71f\": container with ID starting with 1f9c4530f583ab0f8682df45cd72a76557f8db3be5422d5e164378efdbf1d71f not found: ID does not exist" Dec 10 01:35:33 crc kubenswrapper[4884]: I1210 01:35:33.318136 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c3668cb-2285-4d10-8759-344eaefa1790" path="/var/lib/kubelet/pods/0c3668cb-2285-4d10-8759-344eaefa1790/volumes" Dec 10 01:35:35 crc kubenswrapper[4884]: E1210 01:35:35.288809 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:35:38 crc kubenswrapper[4884]: I1210 01:35:38.288208 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:35:38 crc kubenswrapper[4884]: E1210 01:35:38.289127 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:35:40 crc kubenswrapper[4884]: I1210 01:35:40.290219 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 01:35:40 crc kubenswrapper[4884]: E1210 01:35:40.388000 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:35:40 crc kubenswrapper[4884]: E1210 01:35:40.388045 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:35:40 crc kubenswrapper[4884]: E1210 01:35:40.388145 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:35:40 crc kubenswrapper[4884]: E1210 01:35:40.389400 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:35:48 crc kubenswrapper[4884]: I1210 01:35:48.653879 4884 generic.go:334] "Generic (PLEG): container finished" podID="43fa1b12-f3b7-4bb1-a632-f3f756e29d48" containerID="10c385c9af72f665c74e089ea64e85111612040ebb47e859dea373282edba8a6" exitCode=2 Dec 10 01:35:48 crc kubenswrapper[4884]: I1210 01:35:48.654039 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" event={"ID":"43fa1b12-f3b7-4bb1-a632-f3f756e29d48","Type":"ContainerDied","Data":"10c385c9af72f665c74e089ea64e85111612040ebb47e859dea373282edba8a6"} Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.208201 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.335685 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-0\") pod \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.335794 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-inventory\") pod \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.335999 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ssh-key\") pod \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.336032 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-telemetry-combined-ca-bundle\") pod \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.336170 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fkx8f\" (UniqueName: \"kubernetes.io/projected/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-kube-api-access-fkx8f\") pod \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.336217 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-2\") pod \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.336592 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-1\") pod \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\" (UID: \"43fa1b12-f3b7-4bb1-a632-f3f756e29d48\") " Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.343047 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "43fa1b12-f3b7-4bb1-a632-f3f756e29d48" (UID: "43fa1b12-f3b7-4bb1-a632-f3f756e29d48"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.352997 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-kube-api-access-fkx8f" (OuterVolumeSpecName: "kube-api-access-fkx8f") pod "43fa1b12-f3b7-4bb1-a632-f3f756e29d48" (UID: "43fa1b12-f3b7-4bb1-a632-f3f756e29d48"). InnerVolumeSpecName "kube-api-access-fkx8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.383593 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "43fa1b12-f3b7-4bb1-a632-f3f756e29d48" (UID: "43fa1b12-f3b7-4bb1-a632-f3f756e29d48"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.389861 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "43fa1b12-f3b7-4bb1-a632-f3f756e29d48" (UID: "43fa1b12-f3b7-4bb1-a632-f3f756e29d48"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.390618 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "43fa1b12-f3b7-4bb1-a632-f3f756e29d48" (UID: "43fa1b12-f3b7-4bb1-a632-f3f756e29d48"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.404936 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "43fa1b12-f3b7-4bb1-a632-f3f756e29d48" (UID: "43fa1b12-f3b7-4bb1-a632-f3f756e29d48"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.406081 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-inventory" (OuterVolumeSpecName: "inventory") pod "43fa1b12-f3b7-4bb1-a632-f3f756e29d48" (UID: "43fa1b12-f3b7-4bb1-a632-f3f756e29d48"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:35:50 crc kubenswrapper[4884]: E1210 01:35:50.412961 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:35:50 crc kubenswrapper[4884]: E1210 01:35:50.413011 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:35:50 crc kubenswrapper[4884]: E1210 01:35:50.413136 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:35:50 crc kubenswrapper[4884]: E1210 01:35:50.414314 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.439946 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fkx8f\" (UniqueName: \"kubernetes.io/projected/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-kube-api-access-fkx8f\") on node \"crc\" DevicePath \"\"" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.439997 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.440017 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.440039 4884 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.440060 4884 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-inventory\") on node \"crc\" DevicePath \"\"" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.440078 4884 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.440094 4884 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fa1b12-f3b7-4bb1-a632-f3f756e29d48-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.677198 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" event={"ID":"43fa1b12-f3b7-4bb1-a632-f3f756e29d48","Type":"ContainerDied","Data":"0ebaa737dc565468c28adf01c757efc233e32c59666d31807dcf467abebe91b8"} Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.677235 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0ebaa737dc565468c28adf01c757efc233e32c59666d31807dcf467abebe91b8" Dec 10 01:35:50 crc kubenswrapper[4884]: I1210 01:35:50.677295 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p" Dec 10 01:35:52 crc kubenswrapper[4884]: E1210 01:35:52.290623 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:35:53 crc kubenswrapper[4884]: I1210 01:35:53.287864 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:35:53 crc kubenswrapper[4884]: I1210 01:35:53.720551 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"efec57c3caf1ad234def06035a24d60ab1fe8d81bf7754d85ab7afddcc36f507"} Dec 10 01:36:02 crc kubenswrapper[4884]: E1210 01:36:02.290923 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:36:06 crc kubenswrapper[4884]: E1210 01:36:06.292726 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:36:16 crc kubenswrapper[4884]: E1210 01:36:16.290049 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:36:20 crc kubenswrapper[4884]: E1210 01:36:20.289093 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:36:29 crc kubenswrapper[4884]: E1210 01:36:29.293146 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:36:34 crc kubenswrapper[4884]: E1210 01:36:34.291302 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:36:43 crc kubenswrapper[4884]: E1210 01:36:43.289668 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:36:46 crc kubenswrapper[4884]: E1210 01:36:46.291536 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:36:58 crc kubenswrapper[4884]: E1210 01:36:58.290945 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:37:01 crc kubenswrapper[4884]: E1210 01:37:01.290170 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:37:10 crc kubenswrapper[4884]: E1210 01:37:10.290784 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:37:15 crc kubenswrapper[4884]: E1210 01:37:15.290544 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.439084 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dw47v"] Dec 10 01:37:19 crc kubenswrapper[4884]: E1210 01:37:19.442505 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c3668cb-2285-4d10-8759-344eaefa1790" containerName="extract-content" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.442524 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c3668cb-2285-4d10-8759-344eaefa1790" containerName="extract-content" Dec 10 01:37:19 crc kubenswrapper[4884]: E1210 01:37:19.442534 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c3668cb-2285-4d10-8759-344eaefa1790" containerName="registry-server" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.442541 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c3668cb-2285-4d10-8759-344eaefa1790" containerName="registry-server" Dec 10 01:37:19 crc kubenswrapper[4884]: E1210 01:37:19.442559 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c3668cb-2285-4d10-8759-344eaefa1790" containerName="extract-utilities" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.442565 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c3668cb-2285-4d10-8759-344eaefa1790" containerName="extract-utilities" Dec 10 01:37:19 crc kubenswrapper[4884]: E1210 01:37:19.442578 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43fa1b12-f3b7-4bb1-a632-f3f756e29d48" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.442585 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="43fa1b12-f3b7-4bb1-a632-f3f756e29d48" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.442791 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c3668cb-2285-4d10-8759-344eaefa1790" containerName="registry-server" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.442810 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="43fa1b12-f3b7-4bb1-a632-f3f756e29d48" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.444381 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.477349 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dw47v"] Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.601936 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af76a29d-2ed1-4589-a84d-774fa01a2645-utilities\") pod \"certified-operators-dw47v\" (UID: \"af76a29d-2ed1-4589-a84d-774fa01a2645\") " pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.602245 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhf64\" (UniqueName: \"kubernetes.io/projected/af76a29d-2ed1-4589-a84d-774fa01a2645-kube-api-access-qhf64\") pod \"certified-operators-dw47v\" (UID: \"af76a29d-2ed1-4589-a84d-774fa01a2645\") " pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.602467 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af76a29d-2ed1-4589-a84d-774fa01a2645-catalog-content\") pod \"certified-operators-dw47v\" (UID: \"af76a29d-2ed1-4589-a84d-774fa01a2645\") " pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.704596 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af76a29d-2ed1-4589-a84d-774fa01a2645-utilities\") pod \"certified-operators-dw47v\" (UID: \"af76a29d-2ed1-4589-a84d-774fa01a2645\") " pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.705182 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhf64\" (UniqueName: \"kubernetes.io/projected/af76a29d-2ed1-4589-a84d-774fa01a2645-kube-api-access-qhf64\") pod \"certified-operators-dw47v\" (UID: \"af76a29d-2ed1-4589-a84d-774fa01a2645\") " pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.705126 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af76a29d-2ed1-4589-a84d-774fa01a2645-utilities\") pod \"certified-operators-dw47v\" (UID: \"af76a29d-2ed1-4589-a84d-774fa01a2645\") " pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.705321 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af76a29d-2ed1-4589-a84d-774fa01a2645-catalog-content\") pod \"certified-operators-dw47v\" (UID: \"af76a29d-2ed1-4589-a84d-774fa01a2645\") " pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.705614 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af76a29d-2ed1-4589-a84d-774fa01a2645-catalog-content\") pod \"certified-operators-dw47v\" (UID: \"af76a29d-2ed1-4589-a84d-774fa01a2645\") " pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.744425 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhf64\" (UniqueName: \"kubernetes.io/projected/af76a29d-2ed1-4589-a84d-774fa01a2645-kube-api-access-qhf64\") pod \"certified-operators-dw47v\" (UID: \"af76a29d-2ed1-4589-a84d-774fa01a2645\") " pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:19 crc kubenswrapper[4884]: I1210 01:37:19.772562 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:20 crc kubenswrapper[4884]: I1210 01:37:20.286839 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dw47v"] Dec 10 01:37:20 crc kubenswrapper[4884]: W1210 01:37:20.678845 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf76a29d_2ed1_4589_a84d_774fa01a2645.slice/crio-874e21eeb87b0d7210bc589e37dda348940347c7b09e9e45b04e808ac68e488f WatchSource:0}: Error finding container 874e21eeb87b0d7210bc589e37dda348940347c7b09e9e45b04e808ac68e488f: Status 404 returned error can't find the container with id 874e21eeb87b0d7210bc589e37dda348940347c7b09e9e45b04e808ac68e488f Dec 10 01:37:20 crc kubenswrapper[4884]: I1210 01:37:20.965031 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dw47v" event={"ID":"af76a29d-2ed1-4589-a84d-774fa01a2645","Type":"ContainerStarted","Data":"874e21eeb87b0d7210bc589e37dda348940347c7b09e9e45b04e808ac68e488f"} Dec 10 01:37:21 crc kubenswrapper[4884]: I1210 01:37:21.985613 4884 generic.go:334] "Generic (PLEG): container finished" podID="af76a29d-2ed1-4589-a84d-774fa01a2645" containerID="19b951ac9d6628b540c329ec3fd547b6b1a0899e5f736b6e9d89986721bd9090" exitCode=0 Dec 10 01:37:21 crc kubenswrapper[4884]: I1210 01:37:21.985688 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dw47v" event={"ID":"af76a29d-2ed1-4589-a84d-774fa01a2645","Type":"ContainerDied","Data":"19b951ac9d6628b540c329ec3fd547b6b1a0899e5f736b6e9d89986721bd9090"} Dec 10 01:37:24 crc kubenswrapper[4884]: I1210 01:37:24.016366 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dw47v" event={"ID":"af76a29d-2ed1-4589-a84d-774fa01a2645","Type":"ContainerStarted","Data":"b1c90af002eb02b983a85cc1b700dc13fa0aad37c51d951a40205c761d8669a4"} Dec 10 01:37:24 crc kubenswrapper[4884]: E1210 01:37:24.288935 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:37:26 crc kubenswrapper[4884]: I1210 01:37:26.039515 4884 generic.go:334] "Generic (PLEG): container finished" podID="af76a29d-2ed1-4589-a84d-774fa01a2645" containerID="b1c90af002eb02b983a85cc1b700dc13fa0aad37c51d951a40205c761d8669a4" exitCode=0 Dec 10 01:37:26 crc kubenswrapper[4884]: I1210 01:37:26.039715 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dw47v" event={"ID":"af76a29d-2ed1-4589-a84d-774fa01a2645","Type":"ContainerDied","Data":"b1c90af002eb02b983a85cc1b700dc13fa0aad37c51d951a40205c761d8669a4"} Dec 10 01:37:27 crc kubenswrapper[4884]: I1210 01:37:27.058750 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dw47v" event={"ID":"af76a29d-2ed1-4589-a84d-774fa01a2645","Type":"ContainerStarted","Data":"c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411"} Dec 10 01:37:27 crc kubenswrapper[4884]: I1210 01:37:27.093583 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dw47v" podStartSLOduration=3.446965845 podStartE2EDuration="8.093558834s" podCreationTimestamp="2025-12-10 01:37:19 +0000 UTC" firstStartedPulling="2025-12-10 01:37:21.988533388 +0000 UTC m=+4015.066490515" lastFinishedPulling="2025-12-10 01:37:26.635126347 +0000 UTC m=+4019.713083504" observedRunningTime="2025-12-10 01:37:27.086639899 +0000 UTC m=+4020.164597026" watchObservedRunningTime="2025-12-10 01:37:27.093558834 +0000 UTC m=+4020.171515951" Dec 10 01:37:27 crc kubenswrapper[4884]: E1210 01:37:27.311575 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:37:29 crc kubenswrapper[4884]: I1210 01:37:29.773656 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:29 crc kubenswrapper[4884]: I1210 01:37:29.774033 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:30 crc kubenswrapper[4884]: I1210 01:37:30.842718 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-dw47v" podUID="af76a29d-2ed1-4589-a84d-774fa01a2645" containerName="registry-server" probeResult="failure" output=< Dec 10 01:37:30 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 01:37:30 crc kubenswrapper[4884]: > Dec 10 01:37:37 crc kubenswrapper[4884]: E1210 01:37:37.289689 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:37:39 crc kubenswrapper[4884]: I1210 01:37:39.925649 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:39 crc kubenswrapper[4884]: I1210 01:37:39.985756 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:40 crc kubenswrapper[4884]: I1210 01:37:40.176875 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dw47v"] Dec 10 01:37:41 crc kubenswrapper[4884]: I1210 01:37:41.213247 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dw47v" podUID="af76a29d-2ed1-4589-a84d-774fa01a2645" containerName="registry-server" containerID="cri-o://c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411" gracePeriod=2 Dec 10 01:37:41 crc kubenswrapper[4884]: I1210 01:37:41.883093 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:41 crc kubenswrapper[4884]: I1210 01:37:41.987388 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af76a29d-2ed1-4589-a84d-774fa01a2645-utilities\") pod \"af76a29d-2ed1-4589-a84d-774fa01a2645\" (UID: \"af76a29d-2ed1-4589-a84d-774fa01a2645\") " Dec 10 01:37:41 crc kubenswrapper[4884]: I1210 01:37:41.987615 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af76a29d-2ed1-4589-a84d-774fa01a2645-catalog-content\") pod \"af76a29d-2ed1-4589-a84d-774fa01a2645\" (UID: \"af76a29d-2ed1-4589-a84d-774fa01a2645\") " Dec 10 01:37:41 crc kubenswrapper[4884]: I1210 01:37:41.987646 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhf64\" (UniqueName: \"kubernetes.io/projected/af76a29d-2ed1-4589-a84d-774fa01a2645-kube-api-access-qhf64\") pod \"af76a29d-2ed1-4589-a84d-774fa01a2645\" (UID: \"af76a29d-2ed1-4589-a84d-774fa01a2645\") " Dec 10 01:37:41 crc kubenswrapper[4884]: I1210 01:37:41.988551 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af76a29d-2ed1-4589-a84d-774fa01a2645-utilities" (OuterVolumeSpecName: "utilities") pod "af76a29d-2ed1-4589-a84d-774fa01a2645" (UID: "af76a29d-2ed1-4589-a84d-774fa01a2645"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:37:41 crc kubenswrapper[4884]: I1210 01:37:41.996676 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af76a29d-2ed1-4589-a84d-774fa01a2645-kube-api-access-qhf64" (OuterVolumeSpecName: "kube-api-access-qhf64") pod "af76a29d-2ed1-4589-a84d-774fa01a2645" (UID: "af76a29d-2ed1-4589-a84d-774fa01a2645"). InnerVolumeSpecName "kube-api-access-qhf64". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.050632 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af76a29d-2ed1-4589-a84d-774fa01a2645-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "af76a29d-2ed1-4589-a84d-774fa01a2645" (UID: "af76a29d-2ed1-4589-a84d-774fa01a2645"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.090087 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhf64\" (UniqueName: \"kubernetes.io/projected/af76a29d-2ed1-4589-a84d-774fa01a2645-kube-api-access-qhf64\") on node \"crc\" DevicePath \"\"" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.090124 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af76a29d-2ed1-4589-a84d-774fa01a2645-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.090135 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af76a29d-2ed1-4589-a84d-774fa01a2645-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.226208 4884 generic.go:334] "Generic (PLEG): container finished" podID="af76a29d-2ed1-4589-a84d-774fa01a2645" containerID="c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411" exitCode=0 Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.226246 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dw47v" event={"ID":"af76a29d-2ed1-4589-a84d-774fa01a2645","Type":"ContainerDied","Data":"c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411"} Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.226270 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dw47v" event={"ID":"af76a29d-2ed1-4589-a84d-774fa01a2645","Type":"ContainerDied","Data":"874e21eeb87b0d7210bc589e37dda348940347c7b09e9e45b04e808ac68e488f"} Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.226285 4884 scope.go:117] "RemoveContainer" containerID="c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.226330 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dw47v" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.261564 4884 scope.go:117] "RemoveContainer" containerID="b1c90af002eb02b983a85cc1b700dc13fa0aad37c51d951a40205c761d8669a4" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.271594 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dw47v"] Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.284922 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dw47v"] Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.299720 4884 scope.go:117] "RemoveContainer" containerID="19b951ac9d6628b540c329ec3fd547b6b1a0899e5f736b6e9d89986721bd9090" Dec 10 01:37:42 crc kubenswrapper[4884]: E1210 01:37:42.300070 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.365394 4884 scope.go:117] "RemoveContainer" containerID="c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411" Dec 10 01:37:42 crc kubenswrapper[4884]: E1210 01:37:42.366082 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411\": container with ID starting with c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411 not found: ID does not exist" containerID="c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.366171 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411"} err="failed to get container status \"c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411\": rpc error: code = NotFound desc = could not find container \"c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411\": container with ID starting with c2e02682dbaed9bd65afc712855992d07b351a2930afd825541b87735551c411 not found: ID does not exist" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.366208 4884 scope.go:117] "RemoveContainer" containerID="b1c90af002eb02b983a85cc1b700dc13fa0aad37c51d951a40205c761d8669a4" Dec 10 01:37:42 crc kubenswrapper[4884]: E1210 01:37:42.367000 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1c90af002eb02b983a85cc1b700dc13fa0aad37c51d951a40205c761d8669a4\": container with ID starting with b1c90af002eb02b983a85cc1b700dc13fa0aad37c51d951a40205c761d8669a4 not found: ID does not exist" containerID="b1c90af002eb02b983a85cc1b700dc13fa0aad37c51d951a40205c761d8669a4" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.367029 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1c90af002eb02b983a85cc1b700dc13fa0aad37c51d951a40205c761d8669a4"} err="failed to get container status \"b1c90af002eb02b983a85cc1b700dc13fa0aad37c51d951a40205c761d8669a4\": rpc error: code = NotFound desc = could not find container \"b1c90af002eb02b983a85cc1b700dc13fa0aad37c51d951a40205c761d8669a4\": container with ID starting with b1c90af002eb02b983a85cc1b700dc13fa0aad37c51d951a40205c761d8669a4 not found: ID does not exist" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.367046 4884 scope.go:117] "RemoveContainer" containerID="19b951ac9d6628b540c329ec3fd547b6b1a0899e5f736b6e9d89986721bd9090" Dec 10 01:37:42 crc kubenswrapper[4884]: E1210 01:37:42.367340 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19b951ac9d6628b540c329ec3fd547b6b1a0899e5f736b6e9d89986721bd9090\": container with ID starting with 19b951ac9d6628b540c329ec3fd547b6b1a0899e5f736b6e9d89986721bd9090 not found: ID does not exist" containerID="19b951ac9d6628b540c329ec3fd547b6b1a0899e5f736b6e9d89986721bd9090" Dec 10 01:37:42 crc kubenswrapper[4884]: I1210 01:37:42.367497 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19b951ac9d6628b540c329ec3fd547b6b1a0899e5f736b6e9d89986721bd9090"} err="failed to get container status \"19b951ac9d6628b540c329ec3fd547b6b1a0899e5f736b6e9d89986721bd9090\": rpc error: code = NotFound desc = could not find container \"19b951ac9d6628b540c329ec3fd547b6b1a0899e5f736b6e9d89986721bd9090\": container with ID starting with 19b951ac9d6628b540c329ec3fd547b6b1a0899e5f736b6e9d89986721bd9090 not found: ID does not exist" Dec 10 01:37:43 crc kubenswrapper[4884]: I1210 01:37:43.313054 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af76a29d-2ed1-4589-a84d-774fa01a2645" path="/var/lib/kubelet/pods/af76a29d-2ed1-4589-a84d-774fa01a2645/volumes" Dec 10 01:37:48 crc kubenswrapper[4884]: E1210 01:37:48.290753 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:37:54 crc kubenswrapper[4884]: E1210 01:37:54.290185 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:37:59 crc kubenswrapper[4884]: E1210 01:37:59.295728 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:38:09 crc kubenswrapper[4884]: E1210 01:38:09.323422 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:38:13 crc kubenswrapper[4884]: E1210 01:38:13.290778 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:38:18 crc kubenswrapper[4884]: I1210 01:38:18.098930 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:38:18 crc kubenswrapper[4884]: I1210 01:38:18.099798 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:38:20 crc kubenswrapper[4884]: E1210 01:38:20.301641 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:38:26 crc kubenswrapper[4884]: E1210 01:38:26.290621 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:38:31 crc kubenswrapper[4884]: E1210 01:38:31.289839 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:38:39 crc kubenswrapper[4884]: E1210 01:38:39.291580 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:38:47 crc kubenswrapper[4884]: E1210 01:38:47.113710 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:38:48 crc kubenswrapper[4884]: I1210 01:38:48.098264 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:38:48 crc kubenswrapper[4884]: I1210 01:38:48.098850 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:38:51 crc kubenswrapper[4884]: E1210 01:38:51.290389 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:38:57 crc kubenswrapper[4884]: E1210 01:38:57.301555 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:39:04 crc kubenswrapper[4884]: E1210 01:39:04.291023 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:39:08 crc kubenswrapper[4884]: E1210 01:39:08.288821 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:39:15 crc kubenswrapper[4884]: E1210 01:39:15.289553 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:39:18 crc kubenswrapper[4884]: I1210 01:39:18.098855 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:39:18 crc kubenswrapper[4884]: I1210 01:39:18.099280 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:39:18 crc kubenswrapper[4884]: I1210 01:39:18.099348 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 01:39:18 crc kubenswrapper[4884]: I1210 01:39:18.100422 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"efec57c3caf1ad234def06035a24d60ab1fe8d81bf7754d85ab7afddcc36f507"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 01:39:18 crc kubenswrapper[4884]: I1210 01:39:18.100640 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://efec57c3caf1ad234def06035a24d60ab1fe8d81bf7754d85ab7afddcc36f507" gracePeriod=600 Dec 10 01:39:18 crc kubenswrapper[4884]: I1210 01:39:18.422500 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="efec57c3caf1ad234def06035a24d60ab1fe8d81bf7754d85ab7afddcc36f507" exitCode=0 Dec 10 01:39:18 crc kubenswrapper[4884]: I1210 01:39:18.422542 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"efec57c3caf1ad234def06035a24d60ab1fe8d81bf7754d85ab7afddcc36f507"} Dec 10 01:39:18 crc kubenswrapper[4884]: I1210 01:39:18.422637 4884 scope.go:117] "RemoveContainer" containerID="72bd2e849615a773327e0cecc60f7c82e8702bf7bceafcc37b0eb91f0436c50f" Dec 10 01:39:19 crc kubenswrapper[4884]: I1210 01:39:19.445329 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc"} Dec 10 01:39:20 crc kubenswrapper[4884]: E1210 01:39:20.291037 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:39:29 crc kubenswrapper[4884]: E1210 01:39:29.290699 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:39:32 crc kubenswrapper[4884]: E1210 01:39:32.289999 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:39:43 crc kubenswrapper[4884]: E1210 01:39:43.289668 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:39:47 crc kubenswrapper[4884]: E1210 01:39:47.154901 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:39:57 crc kubenswrapper[4884]: E1210 01:39:57.291016 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:40:02 crc kubenswrapper[4884]: E1210 01:40:02.291882 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:40:10 crc kubenswrapper[4884]: E1210 01:40:10.290170 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:40:17 crc kubenswrapper[4884]: E1210 01:40:17.305733 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.387966 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rwwzv"] Dec 10 01:40:19 crc kubenswrapper[4884]: E1210 01:40:19.388818 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af76a29d-2ed1-4589-a84d-774fa01a2645" containerName="registry-server" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.388834 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="af76a29d-2ed1-4589-a84d-774fa01a2645" containerName="registry-server" Dec 10 01:40:19 crc kubenswrapper[4884]: E1210 01:40:19.388846 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af76a29d-2ed1-4589-a84d-774fa01a2645" containerName="extract-utilities" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.388856 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="af76a29d-2ed1-4589-a84d-774fa01a2645" containerName="extract-utilities" Dec 10 01:40:19 crc kubenswrapper[4884]: E1210 01:40:19.388909 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af76a29d-2ed1-4589-a84d-774fa01a2645" containerName="extract-content" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.388919 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="af76a29d-2ed1-4589-a84d-774fa01a2645" containerName="extract-content" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.389226 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="af76a29d-2ed1-4589-a84d-774fa01a2645" containerName="registry-server" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.391134 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.403515 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rwwzv"] Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.499752 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rj7k2\" (UniqueName: \"kubernetes.io/projected/6615d252-b80f-4c3e-8150-4412c0e8e7b6-kube-api-access-rj7k2\") pod \"community-operators-rwwzv\" (UID: \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\") " pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.499888 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6615d252-b80f-4c3e-8150-4412c0e8e7b6-utilities\") pod \"community-operators-rwwzv\" (UID: \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\") " pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.499920 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6615d252-b80f-4c3e-8150-4412c0e8e7b6-catalog-content\") pod \"community-operators-rwwzv\" (UID: \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\") " pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.602196 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6615d252-b80f-4c3e-8150-4412c0e8e7b6-utilities\") pod \"community-operators-rwwzv\" (UID: \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\") " pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.602267 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6615d252-b80f-4c3e-8150-4412c0e8e7b6-catalog-content\") pod \"community-operators-rwwzv\" (UID: \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\") " pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.602373 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rj7k2\" (UniqueName: \"kubernetes.io/projected/6615d252-b80f-4c3e-8150-4412c0e8e7b6-kube-api-access-rj7k2\") pod \"community-operators-rwwzv\" (UID: \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\") " pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.602815 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6615d252-b80f-4c3e-8150-4412c0e8e7b6-catalog-content\") pod \"community-operators-rwwzv\" (UID: \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\") " pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.603338 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6615d252-b80f-4c3e-8150-4412c0e8e7b6-utilities\") pod \"community-operators-rwwzv\" (UID: \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\") " pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:19 crc kubenswrapper[4884]: I1210 01:40:19.972144 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rj7k2\" (UniqueName: \"kubernetes.io/projected/6615d252-b80f-4c3e-8150-4412c0e8e7b6-kube-api-access-rj7k2\") pod \"community-operators-rwwzv\" (UID: \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\") " pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:20 crc kubenswrapper[4884]: I1210 01:40:20.018628 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:20 crc kubenswrapper[4884]: I1210 01:40:20.508817 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rwwzv"] Dec 10 01:40:20 crc kubenswrapper[4884]: I1210 01:40:20.656064 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rwwzv" event={"ID":"6615d252-b80f-4c3e-8150-4412c0e8e7b6","Type":"ContainerStarted","Data":"e95dd1db3dbecd8c95a663b99c54515bd2cf134fb391c533c71cd0c477e5ebc6"} Dec 10 01:40:21 crc kubenswrapper[4884]: I1210 01:40:21.673146 4884 generic.go:334] "Generic (PLEG): container finished" podID="6615d252-b80f-4c3e-8150-4412c0e8e7b6" containerID="7c235979e658d13d6cce2ccbc250a9faa78d256713a7304aad523ee258d1ecdc" exitCode=0 Dec 10 01:40:21 crc kubenswrapper[4884]: I1210 01:40:21.673231 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rwwzv" event={"ID":"6615d252-b80f-4c3e-8150-4412c0e8e7b6","Type":"ContainerDied","Data":"7c235979e658d13d6cce2ccbc250a9faa78d256713a7304aad523ee258d1ecdc"} Dec 10 01:40:22 crc kubenswrapper[4884]: E1210 01:40:22.288979 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:40:22 crc kubenswrapper[4884]: I1210 01:40:22.693070 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rwwzv" event={"ID":"6615d252-b80f-4c3e-8150-4412c0e8e7b6","Type":"ContainerStarted","Data":"12046976f14a07d1f85f698d0c62ad6135cf936dfef3c666494d5e836373c9ec"} Dec 10 01:40:23 crc kubenswrapper[4884]: I1210 01:40:23.706859 4884 generic.go:334] "Generic (PLEG): container finished" podID="6615d252-b80f-4c3e-8150-4412c0e8e7b6" containerID="12046976f14a07d1f85f698d0c62ad6135cf936dfef3c666494d5e836373c9ec" exitCode=0 Dec 10 01:40:23 crc kubenswrapper[4884]: I1210 01:40:23.706915 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rwwzv" event={"ID":"6615d252-b80f-4c3e-8150-4412c0e8e7b6","Type":"ContainerDied","Data":"12046976f14a07d1f85f698d0c62ad6135cf936dfef3c666494d5e836373c9ec"} Dec 10 01:40:25 crc kubenswrapper[4884]: I1210 01:40:25.731183 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rwwzv" event={"ID":"6615d252-b80f-4c3e-8150-4412c0e8e7b6","Type":"ContainerStarted","Data":"3495ae4b502a0906d922e059e4d1813e207ad3344d73ce805cfc1f0632794e32"} Dec 10 01:40:25 crc kubenswrapper[4884]: I1210 01:40:25.757714 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rwwzv" podStartSLOduration=3.83105025 podStartE2EDuration="6.757696242s" podCreationTimestamp="2025-12-10 01:40:19 +0000 UTC" firstStartedPulling="2025-12-10 01:40:21.676119296 +0000 UTC m=+4194.754076423" lastFinishedPulling="2025-12-10 01:40:24.602765268 +0000 UTC m=+4197.680722415" observedRunningTime="2025-12-10 01:40:25.754679461 +0000 UTC m=+4198.832636588" watchObservedRunningTime="2025-12-10 01:40:25.757696242 +0000 UTC m=+4198.835653359" Dec 10 01:40:30 crc kubenswrapper[4884]: I1210 01:40:30.018728 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:30 crc kubenswrapper[4884]: I1210 01:40:30.019261 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:30 crc kubenswrapper[4884]: I1210 01:40:30.538805 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:30 crc kubenswrapper[4884]: I1210 01:40:30.862713 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:30 crc kubenswrapper[4884]: I1210 01:40:30.927464 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rwwzv"] Dec 10 01:40:32 crc kubenswrapper[4884]: E1210 01:40:32.290955 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:40:32 crc kubenswrapper[4884]: I1210 01:40:32.821259 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rwwzv" podUID="6615d252-b80f-4c3e-8150-4412c0e8e7b6" containerName="registry-server" containerID="cri-o://3495ae4b502a0906d922e059e4d1813e207ad3344d73ce805cfc1f0632794e32" gracePeriod=2 Dec 10 01:40:33 crc kubenswrapper[4884]: I1210 01:40:33.838757 4884 generic.go:334] "Generic (PLEG): container finished" podID="6615d252-b80f-4c3e-8150-4412c0e8e7b6" containerID="3495ae4b502a0906d922e059e4d1813e207ad3344d73ce805cfc1f0632794e32" exitCode=0 Dec 10 01:40:33 crc kubenswrapper[4884]: I1210 01:40:33.838880 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rwwzv" event={"ID":"6615d252-b80f-4c3e-8150-4412c0e8e7b6","Type":"ContainerDied","Data":"3495ae4b502a0906d922e059e4d1813e207ad3344d73ce805cfc1f0632794e32"} Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.142274 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.175039 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6615d252-b80f-4c3e-8150-4412c0e8e7b6-catalog-content\") pod \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\" (UID: \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\") " Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.175186 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rj7k2\" (UniqueName: \"kubernetes.io/projected/6615d252-b80f-4c3e-8150-4412c0e8e7b6-kube-api-access-rj7k2\") pod \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\" (UID: \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\") " Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.175215 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6615d252-b80f-4c3e-8150-4412c0e8e7b6-utilities\") pod \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\" (UID: \"6615d252-b80f-4c3e-8150-4412c0e8e7b6\") " Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.176460 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6615d252-b80f-4c3e-8150-4412c0e8e7b6-utilities" (OuterVolumeSpecName: "utilities") pod "6615d252-b80f-4c3e-8150-4412c0e8e7b6" (UID: "6615d252-b80f-4c3e-8150-4412c0e8e7b6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.194771 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6615d252-b80f-4c3e-8150-4412c0e8e7b6-kube-api-access-rj7k2" (OuterVolumeSpecName: "kube-api-access-rj7k2") pod "6615d252-b80f-4c3e-8150-4412c0e8e7b6" (UID: "6615d252-b80f-4c3e-8150-4412c0e8e7b6"). InnerVolumeSpecName "kube-api-access-rj7k2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.243562 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6615d252-b80f-4c3e-8150-4412c0e8e7b6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6615d252-b80f-4c3e-8150-4412c0e8e7b6" (UID: "6615d252-b80f-4c3e-8150-4412c0e8e7b6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.277633 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6615d252-b80f-4c3e-8150-4412c0e8e7b6-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.277662 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rj7k2\" (UniqueName: \"kubernetes.io/projected/6615d252-b80f-4c3e-8150-4412c0e8e7b6-kube-api-access-rj7k2\") on node \"crc\" DevicePath \"\"" Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.277688 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6615d252-b80f-4c3e-8150-4412c0e8e7b6-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.861397 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rwwzv" event={"ID":"6615d252-b80f-4c3e-8150-4412c0e8e7b6","Type":"ContainerDied","Data":"e95dd1db3dbecd8c95a663b99c54515bd2cf134fb391c533c71cd0c477e5ebc6"} Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.861512 4884 scope.go:117] "RemoveContainer" containerID="3495ae4b502a0906d922e059e4d1813e207ad3344d73ce805cfc1f0632794e32" Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.861574 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rwwzv" Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.897469 4884 scope.go:117] "RemoveContainer" containerID="12046976f14a07d1f85f698d0c62ad6135cf936dfef3c666494d5e836373c9ec" Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.943033 4884 scope.go:117] "RemoveContainer" containerID="7c235979e658d13d6cce2ccbc250a9faa78d256713a7304aad523ee258d1ecdc" Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.943170 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rwwzv"] Dec 10 01:40:34 crc kubenswrapper[4884]: I1210 01:40:34.956290 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rwwzv"] Dec 10 01:40:35 crc kubenswrapper[4884]: I1210 01:40:35.315021 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6615d252-b80f-4c3e-8150-4412c0e8e7b6" path="/var/lib/kubelet/pods/6615d252-b80f-4c3e-8150-4412c0e8e7b6/volumes" Dec 10 01:40:36 crc kubenswrapper[4884]: E1210 01:40:36.294000 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:40:47 crc kubenswrapper[4884]: I1210 01:40:47.307956 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 01:40:47 crc kubenswrapper[4884]: E1210 01:40:47.414335 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:40:47 crc kubenswrapper[4884]: E1210 01:40:47.414892 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:40:47 crc kubenswrapper[4884]: E1210 01:40:47.415141 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:40:47 crc kubenswrapper[4884]: E1210 01:40:47.416962 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:40:49 crc kubenswrapper[4884]: E1210 01:40:49.290492 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:40:58 crc kubenswrapper[4884]: E1210 01:40:58.291612 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:41:02 crc kubenswrapper[4884]: E1210 01:41:02.417051 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:41:02 crc kubenswrapper[4884]: E1210 01:41:02.417694 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:41:02 crc kubenswrapper[4884]: E1210 01:41:02.417893 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:41:02 crc kubenswrapper[4884]: E1210 01:41:02.419074 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:41:13 crc kubenswrapper[4884]: E1210 01:41:13.290691 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:41:13 crc kubenswrapper[4884]: E1210 01:41:13.290881 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:41:18 crc kubenswrapper[4884]: I1210 01:41:18.098635 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:41:18 crc kubenswrapper[4884]: I1210 01:41:18.099305 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:41:26 crc kubenswrapper[4884]: E1210 01:41:26.291538 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:41:27 crc kubenswrapper[4884]: E1210 01:41:27.300231 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:41:38 crc kubenswrapper[4884]: E1210 01:41:38.292053 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:41:38 crc kubenswrapper[4884]: E1210 01:41:38.292049 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:41:48 crc kubenswrapper[4884]: I1210 01:41:48.098427 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:41:48 crc kubenswrapper[4884]: I1210 01:41:48.098995 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:41:51 crc kubenswrapper[4884]: E1210 01:41:51.291333 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:41:52 crc kubenswrapper[4884]: E1210 01:41:52.288683 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:42:03 crc kubenswrapper[4884]: E1210 01:42:03.291784 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:42:05 crc kubenswrapper[4884]: E1210 01:42:05.293132 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:42:17 crc kubenswrapper[4884]: E1210 01:42:17.307737 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:42:18 crc kubenswrapper[4884]: I1210 01:42:18.098716 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:42:18 crc kubenswrapper[4884]: I1210 01:42:18.099036 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:42:18 crc kubenswrapper[4884]: I1210 01:42:18.099085 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 01:42:18 crc kubenswrapper[4884]: I1210 01:42:18.100076 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 01:42:18 crc kubenswrapper[4884]: I1210 01:42:18.100177 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" gracePeriod=600 Dec 10 01:42:18 crc kubenswrapper[4884]: E1210 01:42:18.221342 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:42:18 crc kubenswrapper[4884]: I1210 01:42:18.323725 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" exitCode=0 Dec 10 01:42:18 crc kubenswrapper[4884]: I1210 01:42:18.323772 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc"} Dec 10 01:42:18 crc kubenswrapper[4884]: I1210 01:42:18.323808 4884 scope.go:117] "RemoveContainer" containerID="efec57c3caf1ad234def06035a24d60ab1fe8d81bf7754d85ab7afddcc36f507" Dec 10 01:42:18 crc kubenswrapper[4884]: I1210 01:42:18.324607 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:42:18 crc kubenswrapper[4884]: E1210 01:42:18.325044 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:42:20 crc kubenswrapper[4884]: E1210 01:42:20.292156 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:42:28 crc kubenswrapper[4884]: E1210 01:42:28.291187 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:42:33 crc kubenswrapper[4884]: I1210 01:42:33.288147 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:42:33 crc kubenswrapper[4884]: E1210 01:42:33.289067 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:42:34 crc kubenswrapper[4884]: E1210 01:42:34.290555 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:42:39 crc kubenswrapper[4884]: E1210 01:42:39.291187 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:42:47 crc kubenswrapper[4884]: E1210 01:42:47.291070 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:42:48 crc kubenswrapper[4884]: I1210 01:42:48.287644 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:42:48 crc kubenswrapper[4884]: E1210 01:42:48.288147 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:42:53 crc kubenswrapper[4884]: E1210 01:42:53.291391 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:42:59 crc kubenswrapper[4884]: E1210 01:42:59.289352 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:43:00 crc kubenswrapper[4884]: I1210 01:43:00.288698 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:43:00 crc kubenswrapper[4884]: E1210 01:43:00.289373 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:43:05 crc kubenswrapper[4884]: E1210 01:43:05.290736 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:43:12 crc kubenswrapper[4884]: I1210 01:43:12.287677 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:43:12 crc kubenswrapper[4884]: E1210 01:43:12.289196 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:43:12 crc kubenswrapper[4884]: E1210 01:43:12.289715 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:43:19 crc kubenswrapper[4884]: E1210 01:43:19.290516 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:43:25 crc kubenswrapper[4884]: I1210 01:43:25.287847 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:43:25 crc kubenswrapper[4884]: E1210 01:43:25.288740 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:43:27 crc kubenswrapper[4884]: E1210 01:43:27.309741 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:43:34 crc kubenswrapper[4884]: E1210 01:43:34.290147 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:43:38 crc kubenswrapper[4884]: I1210 01:43:38.287136 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:43:38 crc kubenswrapper[4884]: E1210 01:43:38.287920 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:43:39 crc kubenswrapper[4884]: E1210 01:43:39.291350 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:43:46 crc kubenswrapper[4884]: E1210 01:43:46.291203 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:43:50 crc kubenswrapper[4884]: E1210 01:43:50.288635 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.288505 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:43:51 crc kubenswrapper[4884]: E1210 01:43:51.289361 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.462519 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cmbkv"] Dec 10 01:43:51 crc kubenswrapper[4884]: E1210 01:43:51.463248 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6615d252-b80f-4c3e-8150-4412c0e8e7b6" containerName="registry-server" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.463265 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6615d252-b80f-4c3e-8150-4412c0e8e7b6" containerName="registry-server" Dec 10 01:43:51 crc kubenswrapper[4884]: E1210 01:43:51.463300 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6615d252-b80f-4c3e-8150-4412c0e8e7b6" containerName="extract-content" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.463310 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6615d252-b80f-4c3e-8150-4412c0e8e7b6" containerName="extract-content" Dec 10 01:43:51 crc kubenswrapper[4884]: E1210 01:43:51.463360 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6615d252-b80f-4c3e-8150-4412c0e8e7b6" containerName="extract-utilities" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.463368 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6615d252-b80f-4c3e-8150-4412c0e8e7b6" containerName="extract-utilities" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.463681 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6615d252-b80f-4c3e-8150-4412c0e8e7b6" containerName="registry-server" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.466137 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.488068 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cmbkv"] Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.522298 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ghh9\" (UniqueName: \"kubernetes.io/projected/7c663236-01a6-41db-8af8-45c9bb76dbe3-kube-api-access-6ghh9\") pod \"redhat-marketplace-cmbkv\" (UID: \"7c663236-01a6-41db-8af8-45c9bb76dbe3\") " pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.522394 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c663236-01a6-41db-8af8-45c9bb76dbe3-catalog-content\") pod \"redhat-marketplace-cmbkv\" (UID: \"7c663236-01a6-41db-8af8-45c9bb76dbe3\") " pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.522490 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c663236-01a6-41db-8af8-45c9bb76dbe3-utilities\") pod \"redhat-marketplace-cmbkv\" (UID: \"7c663236-01a6-41db-8af8-45c9bb76dbe3\") " pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.624734 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ghh9\" (UniqueName: \"kubernetes.io/projected/7c663236-01a6-41db-8af8-45c9bb76dbe3-kube-api-access-6ghh9\") pod \"redhat-marketplace-cmbkv\" (UID: \"7c663236-01a6-41db-8af8-45c9bb76dbe3\") " pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.624838 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c663236-01a6-41db-8af8-45c9bb76dbe3-catalog-content\") pod \"redhat-marketplace-cmbkv\" (UID: \"7c663236-01a6-41db-8af8-45c9bb76dbe3\") " pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.624904 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c663236-01a6-41db-8af8-45c9bb76dbe3-utilities\") pod \"redhat-marketplace-cmbkv\" (UID: \"7c663236-01a6-41db-8af8-45c9bb76dbe3\") " pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.625474 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c663236-01a6-41db-8af8-45c9bb76dbe3-catalog-content\") pod \"redhat-marketplace-cmbkv\" (UID: \"7c663236-01a6-41db-8af8-45c9bb76dbe3\") " pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.625534 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c663236-01a6-41db-8af8-45c9bb76dbe3-utilities\") pod \"redhat-marketplace-cmbkv\" (UID: \"7c663236-01a6-41db-8af8-45c9bb76dbe3\") " pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.658556 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ghh9\" (UniqueName: \"kubernetes.io/projected/7c663236-01a6-41db-8af8-45c9bb76dbe3-kube-api-access-6ghh9\") pod \"redhat-marketplace-cmbkv\" (UID: \"7c663236-01a6-41db-8af8-45c9bb76dbe3\") " pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:43:51 crc kubenswrapper[4884]: I1210 01:43:51.808627 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:43:52 crc kubenswrapper[4884]: I1210 01:43:52.344319 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cmbkv"] Dec 10 01:43:52 crc kubenswrapper[4884]: I1210 01:43:52.592157 4884 generic.go:334] "Generic (PLEG): container finished" podID="7c663236-01a6-41db-8af8-45c9bb76dbe3" containerID="41a665b3a23d81227c1983d1ed0acee91763d3757034c50e1733a0e961a358e2" exitCode=0 Dec 10 01:43:52 crc kubenswrapper[4884]: I1210 01:43:52.592264 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cmbkv" event={"ID":"7c663236-01a6-41db-8af8-45c9bb76dbe3","Type":"ContainerDied","Data":"41a665b3a23d81227c1983d1ed0acee91763d3757034c50e1733a0e961a358e2"} Dec 10 01:43:52 crc kubenswrapper[4884]: I1210 01:43:52.593347 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cmbkv" event={"ID":"7c663236-01a6-41db-8af8-45c9bb76dbe3","Type":"ContainerStarted","Data":"4d911482ae5ae79202de7ce0d07aff0376b94b9ce60ffc4d854a59fea8de752a"} Dec 10 01:43:54 crc kubenswrapper[4884]: I1210 01:43:54.620316 4884 generic.go:334] "Generic (PLEG): container finished" podID="7c663236-01a6-41db-8af8-45c9bb76dbe3" containerID="b24c55c1e97740a24069c7776da1a7dd432de1600a210382dcfeeb33f3ad87d5" exitCode=0 Dec 10 01:43:54 crc kubenswrapper[4884]: I1210 01:43:54.620373 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cmbkv" event={"ID":"7c663236-01a6-41db-8af8-45c9bb76dbe3","Type":"ContainerDied","Data":"b24c55c1e97740a24069c7776da1a7dd432de1600a210382dcfeeb33f3ad87d5"} Dec 10 01:43:55 crc kubenswrapper[4884]: I1210 01:43:55.630847 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cmbkv" event={"ID":"7c663236-01a6-41db-8af8-45c9bb76dbe3","Type":"ContainerStarted","Data":"d8f096cdb0c2f0d7a229f2d12b18c74db4ace85ec258a4bf04d2afaa2b60a491"} Dec 10 01:43:55 crc kubenswrapper[4884]: I1210 01:43:55.659493 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cmbkv" podStartSLOduration=2.00336349 podStartE2EDuration="4.65947108s" podCreationTimestamp="2025-12-10 01:43:51 +0000 UTC" firstStartedPulling="2025-12-10 01:43:52.594005178 +0000 UTC m=+4405.671962315" lastFinishedPulling="2025-12-10 01:43:55.250112738 +0000 UTC m=+4408.328069905" observedRunningTime="2025-12-10 01:43:55.651449605 +0000 UTC m=+4408.729406742" watchObservedRunningTime="2025-12-10 01:43:55.65947108 +0000 UTC m=+4408.737428207" Dec 10 01:43:57 crc kubenswrapper[4884]: E1210 01:43:57.296361 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:44:01 crc kubenswrapper[4884]: I1210 01:44:01.809541 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:44:01 crc kubenswrapper[4884]: I1210 01:44:01.810077 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:44:01 crc kubenswrapper[4884]: I1210 01:44:01.877755 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:44:02 crc kubenswrapper[4884]: I1210 01:44:02.287940 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:44:02 crc kubenswrapper[4884]: E1210 01:44:02.288547 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:44:02 crc kubenswrapper[4884]: I1210 01:44:02.788370 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:44:02 crc kubenswrapper[4884]: I1210 01:44:02.846060 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cmbkv"] Dec 10 01:44:04 crc kubenswrapper[4884]: I1210 01:44:04.734367 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cmbkv" podUID="7c663236-01a6-41db-8af8-45c9bb76dbe3" containerName="registry-server" containerID="cri-o://d8f096cdb0c2f0d7a229f2d12b18c74db4ace85ec258a4bf04d2afaa2b60a491" gracePeriod=2 Dec 10 01:44:05 crc kubenswrapper[4884]: E1210 01:44:05.289598 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:44:05 crc kubenswrapper[4884]: I1210 01:44:05.797852 4884 generic.go:334] "Generic (PLEG): container finished" podID="7c663236-01a6-41db-8af8-45c9bb76dbe3" containerID="d8f096cdb0c2f0d7a229f2d12b18c74db4ace85ec258a4bf04d2afaa2b60a491" exitCode=0 Dec 10 01:44:05 crc kubenswrapper[4884]: I1210 01:44:05.797943 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cmbkv" event={"ID":"7c663236-01a6-41db-8af8-45c9bb76dbe3","Type":"ContainerDied","Data":"d8f096cdb0c2f0d7a229f2d12b18c74db4ace85ec258a4bf04d2afaa2b60a491"} Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.214524 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.287890 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c663236-01a6-41db-8af8-45c9bb76dbe3-catalog-content\") pod \"7c663236-01a6-41db-8af8-45c9bb76dbe3\" (UID: \"7c663236-01a6-41db-8af8-45c9bb76dbe3\") " Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.288157 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ghh9\" (UniqueName: \"kubernetes.io/projected/7c663236-01a6-41db-8af8-45c9bb76dbe3-kube-api-access-6ghh9\") pod \"7c663236-01a6-41db-8af8-45c9bb76dbe3\" (UID: \"7c663236-01a6-41db-8af8-45c9bb76dbe3\") " Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.288246 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c663236-01a6-41db-8af8-45c9bb76dbe3-utilities\") pod \"7c663236-01a6-41db-8af8-45c9bb76dbe3\" (UID: \"7c663236-01a6-41db-8af8-45c9bb76dbe3\") " Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.289121 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c663236-01a6-41db-8af8-45c9bb76dbe3-utilities" (OuterVolumeSpecName: "utilities") pod "7c663236-01a6-41db-8af8-45c9bb76dbe3" (UID: "7c663236-01a6-41db-8af8-45c9bb76dbe3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.289424 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7c663236-01a6-41db-8af8-45c9bb76dbe3-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.315266 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c663236-01a6-41db-8af8-45c9bb76dbe3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7c663236-01a6-41db-8af8-45c9bb76dbe3" (UID: "7c663236-01a6-41db-8af8-45c9bb76dbe3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.368782 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c663236-01a6-41db-8af8-45c9bb76dbe3-kube-api-access-6ghh9" (OuterVolumeSpecName: "kube-api-access-6ghh9") pod "7c663236-01a6-41db-8af8-45c9bb76dbe3" (UID: "7c663236-01a6-41db-8af8-45c9bb76dbe3"). InnerVolumeSpecName "kube-api-access-6ghh9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.392288 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ghh9\" (UniqueName: \"kubernetes.io/projected/7c663236-01a6-41db-8af8-45c9bb76dbe3-kube-api-access-6ghh9\") on node \"crc\" DevicePath \"\"" Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.392338 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7c663236-01a6-41db-8af8-45c9bb76dbe3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.830065 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cmbkv" event={"ID":"7c663236-01a6-41db-8af8-45c9bb76dbe3","Type":"ContainerDied","Data":"4d911482ae5ae79202de7ce0d07aff0376b94b9ce60ffc4d854a59fea8de752a"} Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.830607 4884 scope.go:117] "RemoveContainer" containerID="d8f096cdb0c2f0d7a229f2d12b18c74db4ace85ec258a4bf04d2afaa2b60a491" Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.830129 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cmbkv" Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.859313 4884 scope.go:117] "RemoveContainer" containerID="b24c55c1e97740a24069c7776da1a7dd432de1600a210382dcfeeb33f3ad87d5" Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.896905 4884 scope.go:117] "RemoveContainer" containerID="41a665b3a23d81227c1983d1ed0acee91763d3757034c50e1733a0e961a358e2" Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.899851 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cmbkv"] Dec 10 01:44:06 crc kubenswrapper[4884]: I1210 01:44:06.912055 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cmbkv"] Dec 10 01:44:07 crc kubenswrapper[4884]: I1210 01:44:07.311243 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c663236-01a6-41db-8af8-45c9bb76dbe3" path="/var/lib/kubelet/pods/7c663236-01a6-41db-8af8-45c9bb76dbe3/volumes" Dec 10 01:44:12 crc kubenswrapper[4884]: E1210 01:44:12.289834 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:44:15 crc kubenswrapper[4884]: I1210 01:44:15.287142 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:44:15 crc kubenswrapper[4884]: E1210 01:44:15.287694 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:44:19 crc kubenswrapper[4884]: E1210 01:44:19.290698 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:44:26 crc kubenswrapper[4884]: E1210 01:44:26.290479 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:44:29 crc kubenswrapper[4884]: I1210 01:44:29.289759 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:44:29 crc kubenswrapper[4884]: E1210 01:44:29.291663 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:44:34 crc kubenswrapper[4884]: E1210 01:44:34.290130 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:44:41 crc kubenswrapper[4884]: E1210 01:44:41.291207 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:44:42 crc kubenswrapper[4884]: I1210 01:44:42.287885 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:44:42 crc kubenswrapper[4884]: E1210 01:44:42.288604 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:44:46 crc kubenswrapper[4884]: E1210 01:44:46.293375 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:44:55 crc kubenswrapper[4884]: I1210 01:44:55.287206 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:44:55 crc kubenswrapper[4884]: E1210 01:44:55.288248 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:44:55 crc kubenswrapper[4884]: E1210 01:44:55.290219 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.175858 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp"] Dec 10 01:45:00 crc kubenswrapper[4884]: E1210 01:45:00.176993 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c663236-01a6-41db-8af8-45c9bb76dbe3" containerName="extract-utilities" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.177011 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c663236-01a6-41db-8af8-45c9bb76dbe3" containerName="extract-utilities" Dec 10 01:45:00 crc kubenswrapper[4884]: E1210 01:45:00.177038 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c663236-01a6-41db-8af8-45c9bb76dbe3" containerName="extract-content" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.177049 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c663236-01a6-41db-8af8-45c9bb76dbe3" containerName="extract-content" Dec 10 01:45:00 crc kubenswrapper[4884]: E1210 01:45:00.177069 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c663236-01a6-41db-8af8-45c9bb76dbe3" containerName="registry-server" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.177079 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c663236-01a6-41db-8af8-45c9bb76dbe3" containerName="registry-server" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.177387 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c663236-01a6-41db-8af8-45c9bb76dbe3" containerName="registry-server" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.178572 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.181272 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.181309 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.192180 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp"] Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.277121 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8ec42a4b-b66b-48c3-ac2d-755c59d71034-config-volume\") pod \"collect-profiles-29422185-zzlvp\" (UID: \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.277160 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8ec42a4b-b66b-48c3-ac2d-755c59d71034-secret-volume\") pod \"collect-profiles-29422185-zzlvp\" (UID: \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.277206 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxznm\" (UniqueName: \"kubernetes.io/projected/8ec42a4b-b66b-48c3-ac2d-755c59d71034-kube-api-access-xxznm\") pod \"collect-profiles-29422185-zzlvp\" (UID: \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:00 crc kubenswrapper[4884]: E1210 01:45:00.292109 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.379725 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8ec42a4b-b66b-48c3-ac2d-755c59d71034-config-volume\") pod \"collect-profiles-29422185-zzlvp\" (UID: \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.379772 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8ec42a4b-b66b-48c3-ac2d-755c59d71034-secret-volume\") pod \"collect-profiles-29422185-zzlvp\" (UID: \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.379820 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxznm\" (UniqueName: \"kubernetes.io/projected/8ec42a4b-b66b-48c3-ac2d-755c59d71034-kube-api-access-xxznm\") pod \"collect-profiles-29422185-zzlvp\" (UID: \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.381402 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8ec42a4b-b66b-48c3-ac2d-755c59d71034-config-volume\") pod \"collect-profiles-29422185-zzlvp\" (UID: \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.394055 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8ec42a4b-b66b-48c3-ac2d-755c59d71034-secret-volume\") pod \"collect-profiles-29422185-zzlvp\" (UID: \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.397769 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxznm\" (UniqueName: \"kubernetes.io/projected/8ec42a4b-b66b-48c3-ac2d-755c59d71034-kube-api-access-xxznm\") pod \"collect-profiles-29422185-zzlvp\" (UID: \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:00 crc kubenswrapper[4884]: I1210 01:45:00.511821 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:01 crc kubenswrapper[4884]: I1210 01:45:01.068690 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp"] Dec 10 01:45:01 crc kubenswrapper[4884]: I1210 01:45:01.624607 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" event={"ID":"8ec42a4b-b66b-48c3-ac2d-755c59d71034","Type":"ContainerStarted","Data":"eedb259eb5bd60c2608a0269553fb37e512a874730f1ea939960ab0f547fb421"} Dec 10 01:45:01 crc kubenswrapper[4884]: I1210 01:45:01.625148 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" event={"ID":"8ec42a4b-b66b-48c3-ac2d-755c59d71034","Type":"ContainerStarted","Data":"6053fe470a9d1de8c28f5ab1371d9c44603c9f3a960da135e6a093ad9b6a57ff"} Dec 10 01:45:01 crc kubenswrapper[4884]: I1210 01:45:01.649916 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" podStartSLOduration=1.649896649 podStartE2EDuration="1.649896649s" podCreationTimestamp="2025-12-10 01:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 01:45:01.64246936 +0000 UTC m=+4474.720426497" watchObservedRunningTime="2025-12-10 01:45:01.649896649 +0000 UTC m=+4474.727853766" Dec 10 01:45:02 crc kubenswrapper[4884]: I1210 01:45:02.638185 4884 generic.go:334] "Generic (PLEG): container finished" podID="8ec42a4b-b66b-48c3-ac2d-755c59d71034" containerID="eedb259eb5bd60c2608a0269553fb37e512a874730f1ea939960ab0f547fb421" exitCode=0 Dec 10 01:45:02 crc kubenswrapper[4884]: I1210 01:45:02.638518 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" event={"ID":"8ec42a4b-b66b-48c3-ac2d-755c59d71034","Type":"ContainerDied","Data":"eedb259eb5bd60c2608a0269553fb37e512a874730f1ea939960ab0f547fb421"} Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.084324 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.197922 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8ec42a4b-b66b-48c3-ac2d-755c59d71034-secret-volume\") pod \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\" (UID: \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\") " Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.198111 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8ec42a4b-b66b-48c3-ac2d-755c59d71034-config-volume\") pod \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\" (UID: \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\") " Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.198153 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxznm\" (UniqueName: \"kubernetes.io/projected/8ec42a4b-b66b-48c3-ac2d-755c59d71034-kube-api-access-xxznm\") pod \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\" (UID: \"8ec42a4b-b66b-48c3-ac2d-755c59d71034\") " Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.199577 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ec42a4b-b66b-48c3-ac2d-755c59d71034-config-volume" (OuterVolumeSpecName: "config-volume") pod "8ec42a4b-b66b-48c3-ac2d-755c59d71034" (UID: "8ec42a4b-b66b-48c3-ac2d-755c59d71034"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.203911 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8ec42a4b-b66b-48c3-ac2d-755c59d71034-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8ec42a4b-b66b-48c3-ac2d-755c59d71034" (UID: "8ec42a4b-b66b-48c3-ac2d-755c59d71034"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.204621 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ec42a4b-b66b-48c3-ac2d-755c59d71034-kube-api-access-xxznm" (OuterVolumeSpecName: "kube-api-access-xxznm") pod "8ec42a4b-b66b-48c3-ac2d-755c59d71034" (UID: "8ec42a4b-b66b-48c3-ac2d-755c59d71034"). InnerVolumeSpecName "kube-api-access-xxznm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.302246 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8ec42a4b-b66b-48c3-ac2d-755c59d71034-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.303224 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8ec42a4b-b66b-48c3-ac2d-755c59d71034-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.303332 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxznm\" (UniqueName: \"kubernetes.io/projected/8ec42a4b-b66b-48c3-ac2d-755c59d71034-kube-api-access-xxznm\") on node \"crc\" DevicePath \"\"" Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.677819 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" event={"ID":"8ec42a4b-b66b-48c3-ac2d-755c59d71034","Type":"ContainerDied","Data":"6053fe470a9d1de8c28f5ab1371d9c44603c9f3a960da135e6a093ad9b6a57ff"} Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.678180 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6053fe470a9d1de8c28f5ab1371d9c44603c9f3a960da135e6a093ad9b6a57ff" Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.677857 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422185-zzlvp" Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.742035 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2"] Dec 10 01:45:04 crc kubenswrapper[4884]: I1210 01:45:04.752770 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422140-thtq2"] Dec 10 01:45:05 crc kubenswrapper[4884]: I1210 01:45:05.312371 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823" path="/var/lib/kubelet/pods/e1bcbf7f-9d7b-4c94-99c2-a01b6f1ba823/volumes" Dec 10 01:45:08 crc kubenswrapper[4884]: I1210 01:45:08.288042 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:45:08 crc kubenswrapper[4884]: E1210 01:45:08.288976 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:45:10 crc kubenswrapper[4884]: E1210 01:45:10.289142 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:45:13 crc kubenswrapper[4884]: E1210 01:45:13.289184 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.784444 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-f55c2"] Dec 10 01:45:18 crc kubenswrapper[4884]: E1210 01:45:18.785535 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ec42a4b-b66b-48c3-ac2d-755c59d71034" containerName="collect-profiles" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.785551 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ec42a4b-b66b-48c3-ac2d-755c59d71034" containerName="collect-profiles" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.785767 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ec42a4b-b66b-48c3-ac2d-755c59d71034" containerName="collect-profiles" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.787345 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.806413 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-f55c2"] Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.818640 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31503ebe-10b3-4004-b790-9986dc671df3-utilities\") pod \"redhat-operators-f55c2\" (UID: \"31503ebe-10b3-4004-b790-9986dc671df3\") " pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.824512 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br2nc\" (UniqueName: \"kubernetes.io/projected/31503ebe-10b3-4004-b790-9986dc671df3-kube-api-access-br2nc\") pod \"redhat-operators-f55c2\" (UID: \"31503ebe-10b3-4004-b790-9986dc671df3\") " pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.824753 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31503ebe-10b3-4004-b790-9986dc671df3-catalog-content\") pod \"redhat-operators-f55c2\" (UID: \"31503ebe-10b3-4004-b790-9986dc671df3\") " pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.927545 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31503ebe-10b3-4004-b790-9986dc671df3-utilities\") pod \"redhat-operators-f55c2\" (UID: \"31503ebe-10b3-4004-b790-9986dc671df3\") " pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.927659 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br2nc\" (UniqueName: \"kubernetes.io/projected/31503ebe-10b3-4004-b790-9986dc671df3-kube-api-access-br2nc\") pod \"redhat-operators-f55c2\" (UID: \"31503ebe-10b3-4004-b790-9986dc671df3\") " pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.927797 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31503ebe-10b3-4004-b790-9986dc671df3-catalog-content\") pod \"redhat-operators-f55c2\" (UID: \"31503ebe-10b3-4004-b790-9986dc671df3\") " pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.928409 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31503ebe-10b3-4004-b790-9986dc671df3-catalog-content\") pod \"redhat-operators-f55c2\" (UID: \"31503ebe-10b3-4004-b790-9986dc671df3\") " pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.928414 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31503ebe-10b3-4004-b790-9986dc671df3-utilities\") pod \"redhat-operators-f55c2\" (UID: \"31503ebe-10b3-4004-b790-9986dc671df3\") " pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:18 crc kubenswrapper[4884]: I1210 01:45:18.955543 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br2nc\" (UniqueName: \"kubernetes.io/projected/31503ebe-10b3-4004-b790-9986dc671df3-kube-api-access-br2nc\") pod \"redhat-operators-f55c2\" (UID: \"31503ebe-10b3-4004-b790-9986dc671df3\") " pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:19 crc kubenswrapper[4884]: I1210 01:45:19.111521 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:19 crc kubenswrapper[4884]: I1210 01:45:19.625569 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-f55c2"] Dec 10 01:45:19 crc kubenswrapper[4884]: W1210 01:45:19.644553 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31503ebe_10b3_4004_b790_9986dc671df3.slice/crio-8cf4291ae3dcaa6ed7f46715eefda2e4d3a3af1da859556d8c03c758d3211810 WatchSource:0}: Error finding container 8cf4291ae3dcaa6ed7f46715eefda2e4d3a3af1da859556d8c03c758d3211810: Status 404 returned error can't find the container with id 8cf4291ae3dcaa6ed7f46715eefda2e4d3a3af1da859556d8c03c758d3211810 Dec 10 01:45:19 crc kubenswrapper[4884]: I1210 01:45:19.875880 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f55c2" event={"ID":"31503ebe-10b3-4004-b790-9986dc671df3","Type":"ContainerStarted","Data":"8cf4291ae3dcaa6ed7f46715eefda2e4d3a3af1da859556d8c03c758d3211810"} Dec 10 01:45:20 crc kubenswrapper[4884]: I1210 01:45:20.892204 4884 generic.go:334] "Generic (PLEG): container finished" podID="31503ebe-10b3-4004-b790-9986dc671df3" containerID="4cb39d89ab04244ed863f6061c38a25a77a692b3a04878708eb8ec5dcf98414e" exitCode=0 Dec 10 01:45:20 crc kubenswrapper[4884]: I1210 01:45:20.892308 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f55c2" event={"ID":"31503ebe-10b3-4004-b790-9986dc671df3","Type":"ContainerDied","Data":"4cb39d89ab04244ed863f6061c38a25a77a692b3a04878708eb8ec5dcf98414e"} Dec 10 01:45:21 crc kubenswrapper[4884]: E1210 01:45:21.289735 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:45:21 crc kubenswrapper[4884]: I1210 01:45:21.910170 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f55c2" event={"ID":"31503ebe-10b3-4004-b790-9986dc671df3","Type":"ContainerStarted","Data":"4edcdf68551b1f363fa44766fd4c3b1021b315d2085b8cac20b678d3d02b86fc"} Dec 10 01:45:23 crc kubenswrapper[4884]: I1210 01:45:23.287717 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:45:23 crc kubenswrapper[4884]: E1210 01:45:23.288371 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:45:24 crc kubenswrapper[4884]: I1210 01:45:24.954656 4884 generic.go:334] "Generic (PLEG): container finished" podID="31503ebe-10b3-4004-b790-9986dc671df3" containerID="4edcdf68551b1f363fa44766fd4c3b1021b315d2085b8cac20b678d3d02b86fc" exitCode=0 Dec 10 01:45:24 crc kubenswrapper[4884]: I1210 01:45:24.954739 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f55c2" event={"ID":"31503ebe-10b3-4004-b790-9986dc671df3","Type":"ContainerDied","Data":"4edcdf68551b1f363fa44766fd4c3b1021b315d2085b8cac20b678d3d02b86fc"} Dec 10 01:45:26 crc kubenswrapper[4884]: I1210 01:45:26.988121 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f55c2" event={"ID":"31503ebe-10b3-4004-b790-9986dc671df3","Type":"ContainerStarted","Data":"2ba83caaa63c79289c71154ba6d667c9b5ee012c885fdb2ea47a53dff8ed0b11"} Dec 10 01:45:27 crc kubenswrapper[4884]: I1210 01:45:27.020346 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-f55c2" podStartSLOduration=4.176335206 podStartE2EDuration="9.020319636s" podCreationTimestamp="2025-12-10 01:45:18 +0000 UTC" firstStartedPulling="2025-12-10 01:45:20.896128083 +0000 UTC m=+4493.974085220" lastFinishedPulling="2025-12-10 01:45:25.740112533 +0000 UTC m=+4498.818069650" observedRunningTime="2025-12-10 01:45:27.01526041 +0000 UTC m=+4500.093217567" watchObservedRunningTime="2025-12-10 01:45:27.020319636 +0000 UTC m=+4500.098276773" Dec 10 01:45:27 crc kubenswrapper[4884]: E1210 01:45:27.300283 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:45:29 crc kubenswrapper[4884]: I1210 01:45:29.112324 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:29 crc kubenswrapper[4884]: I1210 01:45:29.112754 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:31 crc kubenswrapper[4884]: I1210 01:45:31.138666 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-f55c2" podUID="31503ebe-10b3-4004-b790-9986dc671df3" containerName="registry-server" probeResult="failure" output=< Dec 10 01:45:31 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 01:45:31 crc kubenswrapper[4884]: > Dec 10 01:45:34 crc kubenswrapper[4884]: E1210 01:45:34.292964 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:45:38 crc kubenswrapper[4884]: I1210 01:45:38.290168 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:45:38 crc kubenswrapper[4884]: E1210 01:45:38.291118 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:45:38 crc kubenswrapper[4884]: E1210 01:45:38.294255 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:45:39 crc kubenswrapper[4884]: I1210 01:45:39.195026 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:39 crc kubenswrapper[4884]: I1210 01:45:39.283667 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:39 crc kubenswrapper[4884]: I1210 01:45:39.452979 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-f55c2"] Dec 10 01:45:41 crc kubenswrapper[4884]: I1210 01:45:41.169626 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-f55c2" podUID="31503ebe-10b3-4004-b790-9986dc671df3" containerName="registry-server" containerID="cri-o://2ba83caaa63c79289c71154ba6d667c9b5ee012c885fdb2ea47a53dff8ed0b11" gracePeriod=2 Dec 10 01:45:42 crc kubenswrapper[4884]: I1210 01:45:42.182170 4884 generic.go:334] "Generic (PLEG): container finished" podID="31503ebe-10b3-4004-b790-9986dc671df3" containerID="2ba83caaa63c79289c71154ba6d667c9b5ee012c885fdb2ea47a53dff8ed0b11" exitCode=0 Dec 10 01:45:42 crc kubenswrapper[4884]: I1210 01:45:42.182223 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f55c2" event={"ID":"31503ebe-10b3-4004-b790-9986dc671df3","Type":"ContainerDied","Data":"2ba83caaa63c79289c71154ba6d667c9b5ee012c885fdb2ea47a53dff8ed0b11"} Dec 10 01:45:42 crc kubenswrapper[4884]: I1210 01:45:42.421277 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:42 crc kubenswrapper[4884]: I1210 01:45:42.468172 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31503ebe-10b3-4004-b790-9986dc671df3-utilities\") pod \"31503ebe-10b3-4004-b790-9986dc671df3\" (UID: \"31503ebe-10b3-4004-b790-9986dc671df3\") " Dec 10 01:45:42 crc kubenswrapper[4884]: I1210 01:45:42.468524 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br2nc\" (UniqueName: \"kubernetes.io/projected/31503ebe-10b3-4004-b790-9986dc671df3-kube-api-access-br2nc\") pod \"31503ebe-10b3-4004-b790-9986dc671df3\" (UID: \"31503ebe-10b3-4004-b790-9986dc671df3\") " Dec 10 01:45:42 crc kubenswrapper[4884]: I1210 01:45:42.468584 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31503ebe-10b3-4004-b790-9986dc671df3-catalog-content\") pod \"31503ebe-10b3-4004-b790-9986dc671df3\" (UID: \"31503ebe-10b3-4004-b790-9986dc671df3\") " Dec 10 01:45:42 crc kubenswrapper[4884]: I1210 01:45:42.470045 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31503ebe-10b3-4004-b790-9986dc671df3-utilities" (OuterVolumeSpecName: "utilities") pod "31503ebe-10b3-4004-b790-9986dc671df3" (UID: "31503ebe-10b3-4004-b790-9986dc671df3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:45:42 crc kubenswrapper[4884]: I1210 01:45:42.486713 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31503ebe-10b3-4004-b790-9986dc671df3-kube-api-access-br2nc" (OuterVolumeSpecName: "kube-api-access-br2nc") pod "31503ebe-10b3-4004-b790-9986dc671df3" (UID: "31503ebe-10b3-4004-b790-9986dc671df3"). InnerVolumeSpecName "kube-api-access-br2nc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:45:42 crc kubenswrapper[4884]: I1210 01:45:42.571761 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31503ebe-10b3-4004-b790-9986dc671df3-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:45:42 crc kubenswrapper[4884]: I1210 01:45:42.571813 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br2nc\" (UniqueName: \"kubernetes.io/projected/31503ebe-10b3-4004-b790-9986dc671df3-kube-api-access-br2nc\") on node \"crc\" DevicePath \"\"" Dec 10 01:45:42 crc kubenswrapper[4884]: I1210 01:45:42.611588 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31503ebe-10b3-4004-b790-9986dc671df3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "31503ebe-10b3-4004-b790-9986dc671df3" (UID: "31503ebe-10b3-4004-b790-9986dc671df3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:45:42 crc kubenswrapper[4884]: I1210 01:45:42.673961 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31503ebe-10b3-4004-b790-9986dc671df3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:45:43 crc kubenswrapper[4884]: I1210 01:45:43.200499 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-f55c2" event={"ID":"31503ebe-10b3-4004-b790-9986dc671df3","Type":"ContainerDied","Data":"8cf4291ae3dcaa6ed7f46715eefda2e4d3a3af1da859556d8c03c758d3211810"} Dec 10 01:45:43 crc kubenswrapper[4884]: I1210 01:45:43.200569 4884 scope.go:117] "RemoveContainer" containerID="2ba83caaa63c79289c71154ba6d667c9b5ee012c885fdb2ea47a53dff8ed0b11" Dec 10 01:45:43 crc kubenswrapper[4884]: I1210 01:45:43.200565 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-f55c2" Dec 10 01:45:43 crc kubenswrapper[4884]: I1210 01:45:43.223004 4884 scope.go:117] "RemoveContainer" containerID="4edcdf68551b1f363fa44766fd4c3b1021b315d2085b8cac20b678d3d02b86fc" Dec 10 01:45:43 crc kubenswrapper[4884]: I1210 01:45:43.247633 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-f55c2"] Dec 10 01:45:43 crc kubenswrapper[4884]: I1210 01:45:43.248131 4884 scope.go:117] "RemoveContainer" containerID="4cb39d89ab04244ed863f6061c38a25a77a692b3a04878708eb8ec5dcf98414e" Dec 10 01:45:43 crc kubenswrapper[4884]: I1210 01:45:43.257919 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-f55c2"] Dec 10 01:45:43 crc kubenswrapper[4884]: I1210 01:45:43.299583 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31503ebe-10b3-4004-b790-9986dc671df3" path="/var/lib/kubelet/pods/31503ebe-10b3-4004-b790-9986dc671df3/volumes" Dec 10 01:45:43 crc kubenswrapper[4884]: I1210 01:45:43.366404 4884 scope.go:117] "RemoveContainer" containerID="1fed379f7bc6284328a126c0c54af4f8c812a3f1b854cdd0b3c607bb245cbfac" Dec 10 01:45:48 crc kubenswrapper[4884]: E1210 01:45:48.290139 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:45:52 crc kubenswrapper[4884]: I1210 01:45:52.287796 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:45:52 crc kubenswrapper[4884]: E1210 01:45:52.288964 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:45:53 crc kubenswrapper[4884]: I1210 01:45:53.291374 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 01:45:53 crc kubenswrapper[4884]: E1210 01:45:53.430801 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:45:53 crc kubenswrapper[4884]: E1210 01:45:53.430902 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:45:53 crc kubenswrapper[4884]: E1210 01:45:53.431139 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:45:53 crc kubenswrapper[4884]: E1210 01:45:53.432416 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:45:59 crc kubenswrapper[4884]: E1210 01:45:59.290916 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:46:04 crc kubenswrapper[4884]: I1210 01:46:04.287320 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:46:04 crc kubenswrapper[4884]: E1210 01:46:04.288383 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:46:07 crc kubenswrapper[4884]: E1210 01:46:07.306046 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:46:13 crc kubenswrapper[4884]: E1210 01:46:13.409897 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:46:13 crc kubenswrapper[4884]: E1210 01:46:13.410500 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:46:13 crc kubenswrapper[4884]: E1210 01:46:13.410655 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:46:13 crc kubenswrapper[4884]: E1210 01:46:13.412084 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:46:16 crc kubenswrapper[4884]: I1210 01:46:16.287877 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:46:16 crc kubenswrapper[4884]: E1210 01:46:16.288733 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:46:20 crc kubenswrapper[4884]: E1210 01:46:20.289736 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:46:27 crc kubenswrapper[4884]: E1210 01:46:27.309666 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:46:30 crc kubenswrapper[4884]: I1210 01:46:30.289287 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:46:30 crc kubenswrapper[4884]: E1210 01:46:30.290170 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:46:34 crc kubenswrapper[4884]: E1210 01:46:34.290356 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:46:41 crc kubenswrapper[4884]: E1210 01:46:41.291140 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:46:43 crc kubenswrapper[4884]: I1210 01:46:43.287173 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:46:43 crc kubenswrapper[4884]: E1210 01:46:43.287772 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:46:45 crc kubenswrapper[4884]: E1210 01:46:45.289387 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:46:52 crc kubenswrapper[4884]: E1210 01:46:52.290409 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:46:55 crc kubenswrapper[4884]: I1210 01:46:55.289053 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:46:55 crc kubenswrapper[4884]: E1210 01:46:55.290160 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:46:57 crc kubenswrapper[4884]: E1210 01:46:57.315236 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:47:04 crc kubenswrapper[4884]: E1210 01:47:04.292252 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:47:09 crc kubenswrapper[4884]: I1210 01:47:09.286750 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:47:09 crc kubenswrapper[4884]: E1210 01:47:09.288533 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:47:11 crc kubenswrapper[4884]: E1210 01:47:11.291579 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:47:16 crc kubenswrapper[4884]: E1210 01:47:16.290605 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:47:21 crc kubenswrapper[4884]: I1210 01:47:21.293687 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:47:22 crc kubenswrapper[4884]: I1210 01:47:22.487917 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"58a7edeffae4f0630f7567a47e1eb77a4163a79cf5c63ee8195427687620eb48"} Dec 10 01:47:23 crc kubenswrapper[4884]: E1210 01:47:23.290749 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:47:29 crc kubenswrapper[4884]: E1210 01:47:29.289706 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:47:35 crc kubenswrapper[4884]: E1210 01:47:35.291765 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:47:43 crc kubenswrapper[4884]: E1210 01:47:43.291378 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:47:49 crc kubenswrapper[4884]: E1210 01:47:49.293704 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:47:57 crc kubenswrapper[4884]: E1210 01:47:57.308863 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:48:02 crc kubenswrapper[4884]: E1210 01:48:02.290500 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:48:10 crc kubenswrapper[4884]: E1210 01:48:10.288806 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:48:16 crc kubenswrapper[4884]: E1210 01:48:16.290561 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:48:21 crc kubenswrapper[4884]: E1210 01:48:21.291861 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:48:28 crc kubenswrapper[4884]: E1210 01:48:28.290384 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:48:34 crc kubenswrapper[4884]: E1210 01:48:34.291045 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:48:39 crc kubenswrapper[4884]: E1210 01:48:39.289524 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:48:48 crc kubenswrapper[4884]: E1210 01:48:48.289479 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:48:52 crc kubenswrapper[4884]: E1210 01:48:52.289752 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:48:59 crc kubenswrapper[4884]: E1210 01:48:59.298523 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:49:06 crc kubenswrapper[4884]: E1210 01:49:06.288857 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:49:10 crc kubenswrapper[4884]: E1210 01:49:10.291590 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:49:17 crc kubenswrapper[4884]: E1210 01:49:17.304839 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:49:25 crc kubenswrapper[4884]: E1210 01:49:25.289233 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:49:28 crc kubenswrapper[4884]: E1210 01:49:28.289637 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:49:39 crc kubenswrapper[4884]: E1210 01:49:39.289979 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:49:42 crc kubenswrapper[4884]: E1210 01:49:42.289247 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:49:48 crc kubenswrapper[4884]: I1210 01:49:48.098119 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:49:48 crc kubenswrapper[4884]: I1210 01:49:48.098796 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:49:52 crc kubenswrapper[4884]: E1210 01:49:52.290271 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:49:55 crc kubenswrapper[4884]: E1210 01:49:55.289588 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:50:04 crc kubenswrapper[4884]: E1210 01:50:04.291093 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:50:06 crc kubenswrapper[4884]: E1210 01:50:06.291707 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:50:18 crc kubenswrapper[4884]: I1210 01:50:18.098479 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:50:18 crc kubenswrapper[4884]: I1210 01:50:18.099116 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:50:18 crc kubenswrapper[4884]: E1210 01:50:18.289854 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.564916 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2w2kb"] Dec 10 01:50:19 crc kubenswrapper[4884]: E1210 01:50:19.565870 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31503ebe-10b3-4004-b790-9986dc671df3" containerName="extract-utilities" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.565903 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="31503ebe-10b3-4004-b790-9986dc671df3" containerName="extract-utilities" Dec 10 01:50:19 crc kubenswrapper[4884]: E1210 01:50:19.565983 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31503ebe-10b3-4004-b790-9986dc671df3" containerName="extract-content" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.566004 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="31503ebe-10b3-4004-b790-9986dc671df3" containerName="extract-content" Dec 10 01:50:19 crc kubenswrapper[4884]: E1210 01:50:19.566066 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31503ebe-10b3-4004-b790-9986dc671df3" containerName="registry-server" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.566083 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="31503ebe-10b3-4004-b790-9986dc671df3" containerName="registry-server" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.566619 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="31503ebe-10b3-4004-b790-9986dc671df3" containerName="registry-server" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.570620 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.587716 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2w2kb"] Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.641895 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5af84785-e4e4-424b-886c-791385803a02-catalog-content\") pod \"community-operators-2w2kb\" (UID: \"5af84785-e4e4-424b-886c-791385803a02\") " pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.641936 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql6vt\" (UniqueName: \"kubernetes.io/projected/5af84785-e4e4-424b-886c-791385803a02-kube-api-access-ql6vt\") pod \"community-operators-2w2kb\" (UID: \"5af84785-e4e4-424b-886c-791385803a02\") " pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.642011 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5af84785-e4e4-424b-886c-791385803a02-utilities\") pod \"community-operators-2w2kb\" (UID: \"5af84785-e4e4-424b-886c-791385803a02\") " pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.742666 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5af84785-e4e4-424b-886c-791385803a02-catalog-content\") pod \"community-operators-2w2kb\" (UID: \"5af84785-e4e4-424b-886c-791385803a02\") " pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.743019 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ql6vt\" (UniqueName: \"kubernetes.io/projected/5af84785-e4e4-424b-886c-791385803a02-kube-api-access-ql6vt\") pod \"community-operators-2w2kb\" (UID: \"5af84785-e4e4-424b-886c-791385803a02\") " pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.743116 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5af84785-e4e4-424b-886c-791385803a02-utilities\") pod \"community-operators-2w2kb\" (UID: \"5af84785-e4e4-424b-886c-791385803a02\") " pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.743265 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5af84785-e4e4-424b-886c-791385803a02-catalog-content\") pod \"community-operators-2w2kb\" (UID: \"5af84785-e4e4-424b-886c-791385803a02\") " pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.743603 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5af84785-e4e4-424b-886c-791385803a02-utilities\") pod \"community-operators-2w2kb\" (UID: \"5af84785-e4e4-424b-886c-791385803a02\") " pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.767266 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql6vt\" (UniqueName: \"kubernetes.io/projected/5af84785-e4e4-424b-886c-791385803a02-kube-api-access-ql6vt\") pod \"community-operators-2w2kb\" (UID: \"5af84785-e4e4-424b-886c-791385803a02\") " pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:19 crc kubenswrapper[4884]: I1210 01:50:19.938155 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:20 crc kubenswrapper[4884]: E1210 01:50:20.293791 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:50:20 crc kubenswrapper[4884]: I1210 01:50:20.436097 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2w2kb"] Dec 10 01:50:20 crc kubenswrapper[4884]: I1210 01:50:20.825172 4884 generic.go:334] "Generic (PLEG): container finished" podID="5af84785-e4e4-424b-886c-791385803a02" containerID="a967ef44c5c9f03df70dafc6b75ddb57ef7e26489f291acf3a2f7cd6af708c89" exitCode=0 Dec 10 01:50:20 crc kubenswrapper[4884]: I1210 01:50:20.825262 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w2kb" event={"ID":"5af84785-e4e4-424b-886c-791385803a02","Type":"ContainerDied","Data":"a967ef44c5c9f03df70dafc6b75ddb57ef7e26489f291acf3a2f7cd6af708c89"} Dec 10 01:50:20 crc kubenswrapper[4884]: I1210 01:50:20.825541 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w2kb" event={"ID":"5af84785-e4e4-424b-886c-791385803a02","Type":"ContainerStarted","Data":"ea9aeafb879fbd42ccd77ff4787c53a2c391afbe862b60d6355cd01f7eeafd97"} Dec 10 01:50:22 crc kubenswrapper[4884]: I1210 01:50:22.863334 4884 generic.go:334] "Generic (PLEG): container finished" podID="5af84785-e4e4-424b-886c-791385803a02" containerID="23c857bee0e2cd2e24870b83460c9b0b35d2c59c32efc96a9450b1e397027b4a" exitCode=0 Dec 10 01:50:22 crc kubenswrapper[4884]: I1210 01:50:22.863415 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w2kb" event={"ID":"5af84785-e4e4-424b-886c-791385803a02","Type":"ContainerDied","Data":"23c857bee0e2cd2e24870b83460c9b0b35d2c59c32efc96a9450b1e397027b4a"} Dec 10 01:50:23 crc kubenswrapper[4884]: I1210 01:50:23.880547 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w2kb" event={"ID":"5af84785-e4e4-424b-886c-791385803a02","Type":"ContainerStarted","Data":"cf0b95db7e6d7a2e693ee2ade7dc9791075cfde4c6942279fd8457c1dde8aeb7"} Dec 10 01:50:23 crc kubenswrapper[4884]: I1210 01:50:23.915451 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2w2kb" podStartSLOduration=2.188918729 podStartE2EDuration="4.915412986s" podCreationTimestamp="2025-12-10 01:50:19 +0000 UTC" firstStartedPulling="2025-12-10 01:50:20.827998236 +0000 UTC m=+4793.905955363" lastFinishedPulling="2025-12-10 01:50:23.554492483 +0000 UTC m=+4796.632449620" observedRunningTime="2025-12-10 01:50:23.909711254 +0000 UTC m=+4796.987668391" watchObservedRunningTime="2025-12-10 01:50:23.915412986 +0000 UTC m=+4796.993370093" Dec 10 01:50:29 crc kubenswrapper[4884]: I1210 01:50:29.938576 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:29 crc kubenswrapper[4884]: I1210 01:50:29.939275 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:30 crc kubenswrapper[4884]: I1210 01:50:30.086850 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:31 crc kubenswrapper[4884]: I1210 01:50:31.052083 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:31 crc kubenswrapper[4884]: I1210 01:50:31.116895 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2w2kb"] Dec 10 01:50:32 crc kubenswrapper[4884]: I1210 01:50:32.992149 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2w2kb" podUID="5af84785-e4e4-424b-886c-791385803a02" containerName="registry-server" containerID="cri-o://cf0b95db7e6d7a2e693ee2ade7dc9791075cfde4c6942279fd8457c1dde8aeb7" gracePeriod=2 Dec 10 01:50:33 crc kubenswrapper[4884]: E1210 01:50:33.289191 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:50:34 crc kubenswrapper[4884]: I1210 01:50:34.006701 4884 generic.go:334] "Generic (PLEG): container finished" podID="5af84785-e4e4-424b-886c-791385803a02" containerID="cf0b95db7e6d7a2e693ee2ade7dc9791075cfde4c6942279fd8457c1dde8aeb7" exitCode=0 Dec 10 01:50:34 crc kubenswrapper[4884]: I1210 01:50:34.006758 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w2kb" event={"ID":"5af84785-e4e4-424b-886c-791385803a02","Type":"ContainerDied","Data":"cf0b95db7e6d7a2e693ee2ade7dc9791075cfde4c6942279fd8457c1dde8aeb7"} Dec 10 01:50:34 crc kubenswrapper[4884]: I1210 01:50:34.118657 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:34 crc kubenswrapper[4884]: I1210 01:50:34.206161 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5af84785-e4e4-424b-886c-791385803a02-catalog-content\") pod \"5af84785-e4e4-424b-886c-791385803a02\" (UID: \"5af84785-e4e4-424b-886c-791385803a02\") " Dec 10 01:50:34 crc kubenswrapper[4884]: I1210 01:50:34.206264 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5af84785-e4e4-424b-886c-791385803a02-utilities\") pod \"5af84785-e4e4-424b-886c-791385803a02\" (UID: \"5af84785-e4e4-424b-886c-791385803a02\") " Dec 10 01:50:34 crc kubenswrapper[4884]: I1210 01:50:34.206547 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ql6vt\" (UniqueName: \"kubernetes.io/projected/5af84785-e4e4-424b-886c-791385803a02-kube-api-access-ql6vt\") pod \"5af84785-e4e4-424b-886c-791385803a02\" (UID: \"5af84785-e4e4-424b-886c-791385803a02\") " Dec 10 01:50:34 crc kubenswrapper[4884]: I1210 01:50:34.207555 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5af84785-e4e4-424b-886c-791385803a02-utilities" (OuterVolumeSpecName: "utilities") pod "5af84785-e4e4-424b-886c-791385803a02" (UID: "5af84785-e4e4-424b-886c-791385803a02"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:50:34 crc kubenswrapper[4884]: I1210 01:50:34.212538 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5af84785-e4e4-424b-886c-791385803a02-kube-api-access-ql6vt" (OuterVolumeSpecName: "kube-api-access-ql6vt") pod "5af84785-e4e4-424b-886c-791385803a02" (UID: "5af84785-e4e4-424b-886c-791385803a02"). InnerVolumeSpecName "kube-api-access-ql6vt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:50:34 crc kubenswrapper[4884]: I1210 01:50:34.277346 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5af84785-e4e4-424b-886c-791385803a02-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5af84785-e4e4-424b-886c-791385803a02" (UID: "5af84785-e4e4-424b-886c-791385803a02"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:50:34 crc kubenswrapper[4884]: I1210 01:50:34.310325 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ql6vt\" (UniqueName: \"kubernetes.io/projected/5af84785-e4e4-424b-886c-791385803a02-kube-api-access-ql6vt\") on node \"crc\" DevicePath \"\"" Dec 10 01:50:34 crc kubenswrapper[4884]: I1210 01:50:34.310348 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5af84785-e4e4-424b-886c-791385803a02-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:50:34 crc kubenswrapper[4884]: I1210 01:50:34.310357 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5af84785-e4e4-424b-886c-791385803a02-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:50:35 crc kubenswrapper[4884]: I1210 01:50:35.024323 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2w2kb" event={"ID":"5af84785-e4e4-424b-886c-791385803a02","Type":"ContainerDied","Data":"ea9aeafb879fbd42ccd77ff4787c53a2c391afbe862b60d6355cd01f7eeafd97"} Dec 10 01:50:35 crc kubenswrapper[4884]: I1210 01:50:35.024377 4884 scope.go:117] "RemoveContainer" containerID="cf0b95db7e6d7a2e693ee2ade7dc9791075cfde4c6942279fd8457c1dde8aeb7" Dec 10 01:50:35 crc kubenswrapper[4884]: I1210 01:50:35.024510 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2w2kb" Dec 10 01:50:35 crc kubenswrapper[4884]: I1210 01:50:35.056847 4884 scope.go:117] "RemoveContainer" containerID="23c857bee0e2cd2e24870b83460c9b0b35d2c59c32efc96a9450b1e397027b4a" Dec 10 01:50:35 crc kubenswrapper[4884]: I1210 01:50:35.080365 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2w2kb"] Dec 10 01:50:35 crc kubenswrapper[4884]: I1210 01:50:35.092275 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2w2kb"] Dec 10 01:50:35 crc kubenswrapper[4884]: E1210 01:50:35.310657 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:50:35 crc kubenswrapper[4884]: I1210 01:50:35.311552 4884 scope.go:117] "RemoveContainer" containerID="a967ef44c5c9f03df70dafc6b75ddb57ef7e26489f291acf3a2f7cd6af708c89" Dec 10 01:50:35 crc kubenswrapper[4884]: I1210 01:50:35.319581 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5af84785-e4e4-424b-886c-791385803a02" path="/var/lib/kubelet/pods/5af84785-e4e4-424b-886c-791385803a02/volumes" Dec 10 01:50:46 crc kubenswrapper[4884]: E1210 01:50:46.290738 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:50:47 crc kubenswrapper[4884]: E1210 01:50:47.301710 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:50:48 crc kubenswrapper[4884]: I1210 01:50:48.099029 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:50:48 crc kubenswrapper[4884]: I1210 01:50:48.099124 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:50:48 crc kubenswrapper[4884]: I1210 01:50:48.099193 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 01:50:48 crc kubenswrapper[4884]: I1210 01:50:48.100360 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"58a7edeffae4f0630f7567a47e1eb77a4163a79cf5c63ee8195427687620eb48"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 01:50:48 crc kubenswrapper[4884]: I1210 01:50:48.100501 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://58a7edeffae4f0630f7567a47e1eb77a4163a79cf5c63ee8195427687620eb48" gracePeriod=600 Dec 10 01:50:49 crc kubenswrapper[4884]: I1210 01:50:49.201863 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"58a7edeffae4f0630f7567a47e1eb77a4163a79cf5c63ee8195427687620eb48"} Dec 10 01:50:49 crc kubenswrapper[4884]: I1210 01:50:49.201836 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="58a7edeffae4f0630f7567a47e1eb77a4163a79cf5c63ee8195427687620eb48" exitCode=0 Dec 10 01:50:49 crc kubenswrapper[4884]: I1210 01:50:49.203392 4884 scope.go:117] "RemoveContainer" containerID="c9992d16739492d45f4ed033a47c8ca5303175ffcd4625082a9a31ed32fee9dc" Dec 10 01:50:49 crc kubenswrapper[4884]: I1210 01:50:49.203473 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea"} Dec 10 01:51:00 crc kubenswrapper[4884]: E1210 01:51:00.293729 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:51:02 crc kubenswrapper[4884]: I1210 01:51:02.290558 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 01:51:02 crc kubenswrapper[4884]: E1210 01:51:02.430348 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:51:02 crc kubenswrapper[4884]: E1210 01:51:02.430468 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:51:02 crc kubenswrapper[4884]: E1210 01:51:02.430730 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:51:02 crc kubenswrapper[4884]: E1210 01:51:02.432030 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:51:11 crc kubenswrapper[4884]: E1210 01:51:11.291046 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:51:16 crc kubenswrapper[4884]: E1210 01:51:16.293132 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:51:24 crc kubenswrapper[4884]: E1210 01:51:24.421810 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:51:24 crc kubenswrapper[4884]: E1210 01:51:24.423019 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:51:24 crc kubenswrapper[4884]: E1210 01:51:24.423247 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:51:24 crc kubenswrapper[4884]: E1210 01:51:24.424519 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:51:27 crc kubenswrapper[4884]: E1210 01:51:27.326819 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:51:38 crc kubenswrapper[4884]: E1210 01:51:38.291379 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:51:39 crc kubenswrapper[4884]: E1210 01:51:39.289583 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:51:51 crc kubenswrapper[4884]: E1210 01:51:51.289981 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:51:53 crc kubenswrapper[4884]: E1210 01:51:53.291052 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:52:04 crc kubenswrapper[4884]: E1210 01:52:04.292022 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:52:05 crc kubenswrapper[4884]: E1210 01:52:05.289010 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:52:16 crc kubenswrapper[4884]: E1210 01:52:16.291854 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:52:20 crc kubenswrapper[4884]: E1210 01:52:20.290939 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:52:27 crc kubenswrapper[4884]: E1210 01:52:27.305762 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:52:35 crc kubenswrapper[4884]: E1210 01:52:35.288988 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:52:38 crc kubenswrapper[4884]: E1210 01:52:38.291816 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:52:47 crc kubenswrapper[4884]: E1210 01:52:47.312036 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:52:48 crc kubenswrapper[4884]: I1210 01:52:48.098299 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:52:48 crc kubenswrapper[4884]: I1210 01:52:48.098357 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:52:49 crc kubenswrapper[4884]: E1210 01:52:49.293031 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:53:00 crc kubenswrapper[4884]: E1210 01:53:00.291590 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:53:01 crc kubenswrapper[4884]: E1210 01:53:01.293573 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.257000 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-d9255"] Dec 10 01:53:05 crc kubenswrapper[4884]: E1210 01:53:05.258088 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5af84785-e4e4-424b-886c-791385803a02" containerName="extract-content" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.258104 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5af84785-e4e4-424b-886c-791385803a02" containerName="extract-content" Dec 10 01:53:05 crc kubenswrapper[4884]: E1210 01:53:05.258159 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5af84785-e4e4-424b-886c-791385803a02" containerName="registry-server" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.258170 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5af84785-e4e4-424b-886c-791385803a02" containerName="registry-server" Dec 10 01:53:05 crc kubenswrapper[4884]: E1210 01:53:05.258184 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5af84785-e4e4-424b-886c-791385803a02" containerName="extract-utilities" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.258193 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="5af84785-e4e4-424b-886c-791385803a02" containerName="extract-utilities" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.258476 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="5af84785-e4e4-424b-886c-791385803a02" containerName="registry-server" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.260314 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.286202 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-d9255"] Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.349640 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qztlc\" (UniqueName: \"kubernetes.io/projected/b160d7cb-5669-462e-90b5-a81b223fb1d1-kube-api-access-qztlc\") pod \"certified-operators-d9255\" (UID: \"b160d7cb-5669-462e-90b5-a81b223fb1d1\") " pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.349884 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b160d7cb-5669-462e-90b5-a81b223fb1d1-catalog-content\") pod \"certified-operators-d9255\" (UID: \"b160d7cb-5669-462e-90b5-a81b223fb1d1\") " pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.350034 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b160d7cb-5669-462e-90b5-a81b223fb1d1-utilities\") pod \"certified-operators-d9255\" (UID: \"b160d7cb-5669-462e-90b5-a81b223fb1d1\") " pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.451970 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b160d7cb-5669-462e-90b5-a81b223fb1d1-catalog-content\") pod \"certified-operators-d9255\" (UID: \"b160d7cb-5669-462e-90b5-a81b223fb1d1\") " pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.452207 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b160d7cb-5669-462e-90b5-a81b223fb1d1-utilities\") pod \"certified-operators-d9255\" (UID: \"b160d7cb-5669-462e-90b5-a81b223fb1d1\") " pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.452395 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qztlc\" (UniqueName: \"kubernetes.io/projected/b160d7cb-5669-462e-90b5-a81b223fb1d1-kube-api-access-qztlc\") pod \"certified-operators-d9255\" (UID: \"b160d7cb-5669-462e-90b5-a81b223fb1d1\") " pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.452427 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b160d7cb-5669-462e-90b5-a81b223fb1d1-catalog-content\") pod \"certified-operators-d9255\" (UID: \"b160d7cb-5669-462e-90b5-a81b223fb1d1\") " pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.452551 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b160d7cb-5669-462e-90b5-a81b223fb1d1-utilities\") pod \"certified-operators-d9255\" (UID: \"b160d7cb-5669-462e-90b5-a81b223fb1d1\") " pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.506502 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qztlc\" (UniqueName: \"kubernetes.io/projected/b160d7cb-5669-462e-90b5-a81b223fb1d1-kube-api-access-qztlc\") pod \"certified-operators-d9255\" (UID: \"b160d7cb-5669-462e-90b5-a81b223fb1d1\") " pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:05 crc kubenswrapper[4884]: I1210 01:53:05.603007 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:06 crc kubenswrapper[4884]: I1210 01:53:06.119365 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-d9255"] Dec 10 01:53:07 crc kubenswrapper[4884]: I1210 01:53:07.072940 4884 generic.go:334] "Generic (PLEG): container finished" podID="b160d7cb-5669-462e-90b5-a81b223fb1d1" containerID="9a84b8d17ca8ef3877cbfd9096faf27462f594b09462c776c9a3ffc69565158f" exitCode=0 Dec 10 01:53:07 crc kubenswrapper[4884]: I1210 01:53:07.073049 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d9255" event={"ID":"b160d7cb-5669-462e-90b5-a81b223fb1d1","Type":"ContainerDied","Data":"9a84b8d17ca8ef3877cbfd9096faf27462f594b09462c776c9a3ffc69565158f"} Dec 10 01:53:07 crc kubenswrapper[4884]: I1210 01:53:07.073498 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d9255" event={"ID":"b160d7cb-5669-462e-90b5-a81b223fb1d1","Type":"ContainerStarted","Data":"b6fc87aa07990c162f0c6afd0790dc8ae4158efc7f4bc0eb557e1b5c4b6e8b25"} Dec 10 01:53:08 crc kubenswrapper[4884]: I1210 01:53:08.089842 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d9255" event={"ID":"b160d7cb-5669-462e-90b5-a81b223fb1d1","Type":"ContainerStarted","Data":"bd19801a6eb3b31cafaa8cafffa11149990974a4f74fb037f98f3bc065ff4d8b"} Dec 10 01:53:09 crc kubenswrapper[4884]: I1210 01:53:09.104728 4884 generic.go:334] "Generic (PLEG): container finished" podID="b160d7cb-5669-462e-90b5-a81b223fb1d1" containerID="bd19801a6eb3b31cafaa8cafffa11149990974a4f74fb037f98f3bc065ff4d8b" exitCode=0 Dec 10 01:53:09 crc kubenswrapper[4884]: I1210 01:53:09.104853 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d9255" event={"ID":"b160d7cb-5669-462e-90b5-a81b223fb1d1","Type":"ContainerDied","Data":"bd19801a6eb3b31cafaa8cafffa11149990974a4f74fb037f98f3bc065ff4d8b"} Dec 10 01:53:10 crc kubenswrapper[4884]: I1210 01:53:10.121549 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d9255" event={"ID":"b160d7cb-5669-462e-90b5-a81b223fb1d1","Type":"ContainerStarted","Data":"a6b339cc951cf436de8f2448457993d2f1e88392f61c4c139996209e14ed400f"} Dec 10 01:53:10 crc kubenswrapper[4884]: I1210 01:53:10.140025 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-d9255" podStartSLOduration=2.606846771 podStartE2EDuration="5.140002185s" podCreationTimestamp="2025-12-10 01:53:05 +0000 UTC" firstStartedPulling="2025-12-10 01:53:07.075510559 +0000 UTC m=+4960.153467686" lastFinishedPulling="2025-12-10 01:53:09.608665963 +0000 UTC m=+4962.686623100" observedRunningTime="2025-12-10 01:53:10.138205146 +0000 UTC m=+4963.216162323" watchObservedRunningTime="2025-12-10 01:53:10.140002185 +0000 UTC m=+4963.217959342" Dec 10 01:53:12 crc kubenswrapper[4884]: E1210 01:53:12.291404 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:53:13 crc kubenswrapper[4884]: E1210 01:53:13.290589 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:53:15 crc kubenswrapper[4884]: I1210 01:53:15.603130 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:15 crc kubenswrapper[4884]: I1210 01:53:15.603764 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:15 crc kubenswrapper[4884]: I1210 01:53:15.704962 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:16 crc kubenswrapper[4884]: I1210 01:53:16.346332 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:16 crc kubenswrapper[4884]: I1210 01:53:16.433714 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-d9255"] Dec 10 01:53:18 crc kubenswrapper[4884]: I1210 01:53:18.098094 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:53:18 crc kubenswrapper[4884]: I1210 01:53:18.098468 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:53:18 crc kubenswrapper[4884]: I1210 01:53:18.276902 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-d9255" podUID="b160d7cb-5669-462e-90b5-a81b223fb1d1" containerName="registry-server" containerID="cri-o://a6b339cc951cf436de8f2448457993d2f1e88392f61c4c139996209e14ed400f" gracePeriod=2 Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.311843 4884 generic.go:334] "Generic (PLEG): container finished" podID="b160d7cb-5669-462e-90b5-a81b223fb1d1" containerID="a6b339cc951cf436de8f2448457993d2f1e88392f61c4c139996209e14ed400f" exitCode=0 Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.316820 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d9255" event={"ID":"b160d7cb-5669-462e-90b5-a81b223fb1d1","Type":"ContainerDied","Data":"a6b339cc951cf436de8f2448457993d2f1e88392f61c4c139996209e14ed400f"} Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.316853 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-d9255" event={"ID":"b160d7cb-5669-462e-90b5-a81b223fb1d1","Type":"ContainerDied","Data":"b6fc87aa07990c162f0c6afd0790dc8ae4158efc7f4bc0eb557e1b5c4b6e8b25"} Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.316864 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6fc87aa07990c162f0c6afd0790dc8ae4158efc7f4bc0eb557e1b5c4b6e8b25" Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.391097 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.506105 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b160d7cb-5669-462e-90b5-a81b223fb1d1-catalog-content\") pod \"b160d7cb-5669-462e-90b5-a81b223fb1d1\" (UID: \"b160d7cb-5669-462e-90b5-a81b223fb1d1\") " Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.506210 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b160d7cb-5669-462e-90b5-a81b223fb1d1-utilities\") pod \"b160d7cb-5669-462e-90b5-a81b223fb1d1\" (UID: \"b160d7cb-5669-462e-90b5-a81b223fb1d1\") " Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.506278 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qztlc\" (UniqueName: \"kubernetes.io/projected/b160d7cb-5669-462e-90b5-a81b223fb1d1-kube-api-access-qztlc\") pod \"b160d7cb-5669-462e-90b5-a81b223fb1d1\" (UID: \"b160d7cb-5669-462e-90b5-a81b223fb1d1\") " Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.507460 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b160d7cb-5669-462e-90b5-a81b223fb1d1-utilities" (OuterVolumeSpecName: "utilities") pod "b160d7cb-5669-462e-90b5-a81b223fb1d1" (UID: "b160d7cb-5669-462e-90b5-a81b223fb1d1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.513521 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b160d7cb-5669-462e-90b5-a81b223fb1d1-kube-api-access-qztlc" (OuterVolumeSpecName: "kube-api-access-qztlc") pod "b160d7cb-5669-462e-90b5-a81b223fb1d1" (UID: "b160d7cb-5669-462e-90b5-a81b223fb1d1"). InnerVolumeSpecName "kube-api-access-qztlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.577256 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b160d7cb-5669-462e-90b5-a81b223fb1d1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b160d7cb-5669-462e-90b5-a81b223fb1d1" (UID: "b160d7cb-5669-462e-90b5-a81b223fb1d1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.609289 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b160d7cb-5669-462e-90b5-a81b223fb1d1-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.609328 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b160d7cb-5669-462e-90b5-a81b223fb1d1-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:53:19 crc kubenswrapper[4884]: I1210 01:53:19.609338 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qztlc\" (UniqueName: \"kubernetes.io/projected/b160d7cb-5669-462e-90b5-a81b223fb1d1-kube-api-access-qztlc\") on node \"crc\" DevicePath \"\"" Dec 10 01:53:20 crc kubenswrapper[4884]: I1210 01:53:20.323711 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-d9255" Dec 10 01:53:20 crc kubenswrapper[4884]: I1210 01:53:20.382012 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-d9255"] Dec 10 01:53:20 crc kubenswrapper[4884]: I1210 01:53:20.411517 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-d9255"] Dec 10 01:53:21 crc kubenswrapper[4884]: I1210 01:53:21.306425 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b160d7cb-5669-462e-90b5-a81b223fb1d1" path="/var/lib/kubelet/pods/b160d7cb-5669-462e-90b5-a81b223fb1d1/volumes" Dec 10 01:53:26 crc kubenswrapper[4884]: E1210 01:53:26.289429 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:53:27 crc kubenswrapper[4884]: E1210 01:53:27.316085 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:53:37 crc kubenswrapper[4884]: E1210 01:53:37.304290 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:53:40 crc kubenswrapper[4884]: E1210 01:53:40.290682 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:53:48 crc kubenswrapper[4884]: I1210 01:53:48.097738 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 01:53:48 crc kubenswrapper[4884]: I1210 01:53:48.098229 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 01:53:48 crc kubenswrapper[4884]: I1210 01:53:48.098550 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 01:53:48 crc kubenswrapper[4884]: I1210 01:53:48.099594 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 01:53:48 crc kubenswrapper[4884]: I1210 01:53:48.099736 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" gracePeriod=600 Dec 10 01:53:48 crc kubenswrapper[4884]: E1210 01:53:48.233383 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:53:48 crc kubenswrapper[4884]: I1210 01:53:48.762188 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" exitCode=0 Dec 10 01:53:48 crc kubenswrapper[4884]: I1210 01:53:48.762261 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea"} Dec 10 01:53:48 crc kubenswrapper[4884]: I1210 01:53:48.762735 4884 scope.go:117] "RemoveContainer" containerID="58a7edeffae4f0630f7567a47e1eb77a4163a79cf5c63ee8195427687620eb48" Dec 10 01:53:48 crc kubenswrapper[4884]: I1210 01:53:48.763631 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:53:48 crc kubenswrapper[4884]: E1210 01:53:48.764092 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:53:52 crc kubenswrapper[4884]: E1210 01:53:52.289367 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:53:55 crc kubenswrapper[4884]: E1210 01:53:55.291560 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:54:03 crc kubenswrapper[4884]: I1210 01:54:03.287610 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:54:03 crc kubenswrapper[4884]: E1210 01:54:03.288824 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:54:03 crc kubenswrapper[4884]: E1210 01:54:03.290555 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:54:08 crc kubenswrapper[4884]: E1210 01:54:08.290727 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:54:14 crc kubenswrapper[4884]: I1210 01:54:14.287132 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:54:14 crc kubenswrapper[4884]: E1210 01:54:14.287882 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:54:15 crc kubenswrapper[4884]: E1210 01:54:15.293867 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:54:23 crc kubenswrapper[4884]: E1210 01:54:23.291919 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:54:29 crc kubenswrapper[4884]: I1210 01:54:29.288175 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:54:29 crc kubenswrapper[4884]: E1210 01:54:29.289410 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:54:30 crc kubenswrapper[4884]: E1210 01:54:30.291918 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:54:36 crc kubenswrapper[4884]: E1210 01:54:36.290389 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:54:41 crc kubenswrapper[4884]: E1210 01:54:41.289602 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:54:42 crc kubenswrapper[4884]: I1210 01:54:42.287404 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:54:42 crc kubenswrapper[4884]: E1210 01:54:42.288318 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:54:50 crc kubenswrapper[4884]: E1210 01:54:50.290048 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:54:56 crc kubenswrapper[4884]: I1210 01:54:56.288268 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:54:56 crc kubenswrapper[4884]: E1210 01:54:56.290940 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:54:56 crc kubenswrapper[4884]: E1210 01:54:56.292226 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:55:05 crc kubenswrapper[4884]: E1210 01:55:05.289728 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:55:07 crc kubenswrapper[4884]: I1210 01:55:07.299243 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:55:07 crc kubenswrapper[4884]: E1210 01:55:07.299947 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:55:10 crc kubenswrapper[4884]: E1210 01:55:10.289792 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:55:16 crc kubenswrapper[4884]: E1210 01:55:16.291361 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:55:21 crc kubenswrapper[4884]: I1210 01:55:21.289484 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:55:21 crc kubenswrapper[4884]: E1210 01:55:21.290557 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:55:24 crc kubenswrapper[4884]: E1210 01:55:24.291951 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:55:29 crc kubenswrapper[4884]: E1210 01:55:29.291716 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:55:34 crc kubenswrapper[4884]: I1210 01:55:34.287727 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:55:34 crc kubenswrapper[4884]: E1210 01:55:34.288557 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:55:35 crc kubenswrapper[4884]: E1210 01:55:35.291263 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.176682 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mtwwq"] Dec 10 01:55:36 crc kubenswrapper[4884]: E1210 01:55:36.177747 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b160d7cb-5669-462e-90b5-a81b223fb1d1" containerName="extract-utilities" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.177850 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b160d7cb-5669-462e-90b5-a81b223fb1d1" containerName="extract-utilities" Dec 10 01:55:36 crc kubenswrapper[4884]: E1210 01:55:36.177943 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b160d7cb-5669-462e-90b5-a81b223fb1d1" containerName="extract-content" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.178017 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b160d7cb-5669-462e-90b5-a81b223fb1d1" containerName="extract-content" Dec 10 01:55:36 crc kubenswrapper[4884]: E1210 01:55:36.178094 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b160d7cb-5669-462e-90b5-a81b223fb1d1" containerName="registry-server" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.178170 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b160d7cb-5669-462e-90b5-a81b223fb1d1" containerName="registry-server" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.178786 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b160d7cb-5669-462e-90b5-a81b223fb1d1" containerName="registry-server" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.180713 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.215581 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mtwwq"] Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.333323 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjdmp\" (UniqueName: \"kubernetes.io/projected/030808a0-e54d-41f9-b305-22639e8239f7-kube-api-access-xjdmp\") pod \"redhat-operators-mtwwq\" (UID: \"030808a0-e54d-41f9-b305-22639e8239f7\") " pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.333452 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/030808a0-e54d-41f9-b305-22639e8239f7-catalog-content\") pod \"redhat-operators-mtwwq\" (UID: \"030808a0-e54d-41f9-b305-22639e8239f7\") " pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.333484 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/030808a0-e54d-41f9-b305-22639e8239f7-utilities\") pod \"redhat-operators-mtwwq\" (UID: \"030808a0-e54d-41f9-b305-22639e8239f7\") " pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.435047 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjdmp\" (UniqueName: \"kubernetes.io/projected/030808a0-e54d-41f9-b305-22639e8239f7-kube-api-access-xjdmp\") pod \"redhat-operators-mtwwq\" (UID: \"030808a0-e54d-41f9-b305-22639e8239f7\") " pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.435226 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/030808a0-e54d-41f9-b305-22639e8239f7-catalog-content\") pod \"redhat-operators-mtwwq\" (UID: \"030808a0-e54d-41f9-b305-22639e8239f7\") " pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.435288 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/030808a0-e54d-41f9-b305-22639e8239f7-utilities\") pod \"redhat-operators-mtwwq\" (UID: \"030808a0-e54d-41f9-b305-22639e8239f7\") " pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.436219 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/030808a0-e54d-41f9-b305-22639e8239f7-catalog-content\") pod \"redhat-operators-mtwwq\" (UID: \"030808a0-e54d-41f9-b305-22639e8239f7\") " pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.436328 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/030808a0-e54d-41f9-b305-22639e8239f7-utilities\") pod \"redhat-operators-mtwwq\" (UID: \"030808a0-e54d-41f9-b305-22639e8239f7\") " pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.464597 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjdmp\" (UniqueName: \"kubernetes.io/projected/030808a0-e54d-41f9-b305-22639e8239f7-kube-api-access-xjdmp\") pod \"redhat-operators-mtwwq\" (UID: \"030808a0-e54d-41f9-b305-22639e8239f7\") " pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:36 crc kubenswrapper[4884]: I1210 01:55:36.538358 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:37 crc kubenswrapper[4884]: I1210 01:55:37.030273 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mtwwq"] Dec 10 01:55:37 crc kubenswrapper[4884]: I1210 01:55:37.188882 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtwwq" event={"ID":"030808a0-e54d-41f9-b305-22639e8239f7","Type":"ContainerStarted","Data":"e4f8c8499e1a977631bfa6f4dc169ef2fdb0fbebd7849111a969a7594a3174b7"} Dec 10 01:55:38 crc kubenswrapper[4884]: I1210 01:55:38.203350 4884 generic.go:334] "Generic (PLEG): container finished" podID="030808a0-e54d-41f9-b305-22639e8239f7" containerID="e9a55e3ed5f9ef5c9883c8b5ecfe0168aec55504d5513ec38f38cbbcdfcaadf6" exitCode=0 Dec 10 01:55:38 crc kubenswrapper[4884]: I1210 01:55:38.203425 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtwwq" event={"ID":"030808a0-e54d-41f9-b305-22639e8239f7","Type":"ContainerDied","Data":"e9a55e3ed5f9ef5c9883c8b5ecfe0168aec55504d5513ec38f38cbbcdfcaadf6"} Dec 10 01:55:40 crc kubenswrapper[4884]: I1210 01:55:40.237372 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtwwq" event={"ID":"030808a0-e54d-41f9-b305-22639e8239f7","Type":"ContainerStarted","Data":"3f49cf74a2475cf1e98e3d799a3747644f2330ad4cfcbf6adc5df8d46768c81d"} Dec 10 01:55:42 crc kubenswrapper[4884]: I1210 01:55:42.260991 4884 generic.go:334] "Generic (PLEG): container finished" podID="030808a0-e54d-41f9-b305-22639e8239f7" containerID="3f49cf74a2475cf1e98e3d799a3747644f2330ad4cfcbf6adc5df8d46768c81d" exitCode=0 Dec 10 01:55:42 crc kubenswrapper[4884]: I1210 01:55:42.261028 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtwwq" event={"ID":"030808a0-e54d-41f9-b305-22639e8239f7","Type":"ContainerDied","Data":"3f49cf74a2475cf1e98e3d799a3747644f2330ad4cfcbf6adc5df8d46768c81d"} Dec 10 01:55:42 crc kubenswrapper[4884]: E1210 01:55:42.290196 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:55:44 crc kubenswrapper[4884]: I1210 01:55:44.303254 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtwwq" event={"ID":"030808a0-e54d-41f9-b305-22639e8239f7","Type":"ContainerStarted","Data":"60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50"} Dec 10 01:55:44 crc kubenswrapper[4884]: I1210 01:55:44.332530 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mtwwq" podStartSLOduration=3.705644713 podStartE2EDuration="8.332506262s" podCreationTimestamp="2025-12-10 01:55:36 +0000 UTC" firstStartedPulling="2025-12-10 01:55:38.20622556 +0000 UTC m=+5111.284182677" lastFinishedPulling="2025-12-10 01:55:42.833087109 +0000 UTC m=+5115.911044226" observedRunningTime="2025-12-10 01:55:44.325135614 +0000 UTC m=+5117.403092761" watchObservedRunningTime="2025-12-10 01:55:44.332506262 +0000 UTC m=+5117.410463389" Dec 10 01:55:46 crc kubenswrapper[4884]: I1210 01:55:46.539117 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:46 crc kubenswrapper[4884]: I1210 01:55:46.539702 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:47 crc kubenswrapper[4884]: I1210 01:55:47.306819 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:55:47 crc kubenswrapper[4884]: E1210 01:55:47.307305 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:55:47 crc kubenswrapper[4884]: I1210 01:55:47.627414 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mtwwq" podUID="030808a0-e54d-41f9-b305-22639e8239f7" containerName="registry-server" probeResult="failure" output=< Dec 10 01:55:47 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 01:55:47 crc kubenswrapper[4884]: > Dec 10 01:55:49 crc kubenswrapper[4884]: E1210 01:55:49.289523 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:55:56 crc kubenswrapper[4884]: E1210 01:55:56.290973 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:55:56 crc kubenswrapper[4884]: I1210 01:55:56.605481 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:56 crc kubenswrapper[4884]: I1210 01:55:56.667020 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:56 crc kubenswrapper[4884]: I1210 01:55:56.847471 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mtwwq"] Dec 10 01:55:58 crc kubenswrapper[4884]: I1210 01:55:58.287651 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:55:58 crc kubenswrapper[4884]: E1210 01:55:58.288357 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:55:58 crc kubenswrapper[4884]: I1210 01:55:58.512129 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mtwwq" podUID="030808a0-e54d-41f9-b305-22639e8239f7" containerName="registry-server" containerID="cri-o://60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50" gracePeriod=2 Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.142356 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.235960 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/030808a0-e54d-41f9-b305-22639e8239f7-catalog-content\") pod \"030808a0-e54d-41f9-b305-22639e8239f7\" (UID: \"030808a0-e54d-41f9-b305-22639e8239f7\") " Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.236718 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xjdmp\" (UniqueName: \"kubernetes.io/projected/030808a0-e54d-41f9-b305-22639e8239f7-kube-api-access-xjdmp\") pod \"030808a0-e54d-41f9-b305-22639e8239f7\" (UID: \"030808a0-e54d-41f9-b305-22639e8239f7\") " Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.236959 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/030808a0-e54d-41f9-b305-22639e8239f7-utilities\") pod \"030808a0-e54d-41f9-b305-22639e8239f7\" (UID: \"030808a0-e54d-41f9-b305-22639e8239f7\") " Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.237604 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/030808a0-e54d-41f9-b305-22639e8239f7-utilities" (OuterVolumeSpecName: "utilities") pod "030808a0-e54d-41f9-b305-22639e8239f7" (UID: "030808a0-e54d-41f9-b305-22639e8239f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.237919 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/030808a0-e54d-41f9-b305-22639e8239f7-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.242842 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/030808a0-e54d-41f9-b305-22639e8239f7-kube-api-access-xjdmp" (OuterVolumeSpecName: "kube-api-access-xjdmp") pod "030808a0-e54d-41f9-b305-22639e8239f7" (UID: "030808a0-e54d-41f9-b305-22639e8239f7"). InnerVolumeSpecName "kube-api-access-xjdmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.340368 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xjdmp\" (UniqueName: \"kubernetes.io/projected/030808a0-e54d-41f9-b305-22639e8239f7-kube-api-access-xjdmp\") on node \"crc\" DevicePath \"\"" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.354485 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/030808a0-e54d-41f9-b305-22639e8239f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "030808a0-e54d-41f9-b305-22639e8239f7" (UID: "030808a0-e54d-41f9-b305-22639e8239f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.443072 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/030808a0-e54d-41f9-b305-22639e8239f7-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.526243 4884 generic.go:334] "Generic (PLEG): container finished" podID="030808a0-e54d-41f9-b305-22639e8239f7" containerID="60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50" exitCode=0 Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.526321 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtwwq" event={"ID":"030808a0-e54d-41f9-b305-22639e8239f7","Type":"ContainerDied","Data":"60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50"} Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.526399 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mtwwq" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.526425 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mtwwq" event={"ID":"030808a0-e54d-41f9-b305-22639e8239f7","Type":"ContainerDied","Data":"e4f8c8499e1a977631bfa6f4dc169ef2fdb0fbebd7849111a969a7594a3174b7"} Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.526470 4884 scope.go:117] "RemoveContainer" containerID="60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.564530 4884 scope.go:117] "RemoveContainer" containerID="3f49cf74a2475cf1e98e3d799a3747644f2330ad4cfcbf6adc5df8d46768c81d" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.587453 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mtwwq"] Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.605038 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mtwwq"] Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.609229 4884 scope.go:117] "RemoveContainer" containerID="e9a55e3ed5f9ef5c9883c8b5ecfe0168aec55504d5513ec38f38cbbcdfcaadf6" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.660132 4884 scope.go:117] "RemoveContainer" containerID="60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50" Dec 10 01:55:59 crc kubenswrapper[4884]: E1210 01:55:59.663187 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50\": container with ID starting with 60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50 not found: ID does not exist" containerID="60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.663258 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50"} err="failed to get container status \"60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50\": rpc error: code = NotFound desc = could not find container \"60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50\": container with ID starting with 60c25853374ee25259f53e34036c9bdaf25797a8d762f7da9c837c79c6de4a50 not found: ID does not exist" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.663312 4884 scope.go:117] "RemoveContainer" containerID="3f49cf74a2475cf1e98e3d799a3747644f2330ad4cfcbf6adc5df8d46768c81d" Dec 10 01:55:59 crc kubenswrapper[4884]: E1210 01:55:59.663708 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f49cf74a2475cf1e98e3d799a3747644f2330ad4cfcbf6adc5df8d46768c81d\": container with ID starting with 3f49cf74a2475cf1e98e3d799a3747644f2330ad4cfcbf6adc5df8d46768c81d not found: ID does not exist" containerID="3f49cf74a2475cf1e98e3d799a3747644f2330ad4cfcbf6adc5df8d46768c81d" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.663746 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f49cf74a2475cf1e98e3d799a3747644f2330ad4cfcbf6adc5df8d46768c81d"} err="failed to get container status \"3f49cf74a2475cf1e98e3d799a3747644f2330ad4cfcbf6adc5df8d46768c81d\": rpc error: code = NotFound desc = could not find container \"3f49cf74a2475cf1e98e3d799a3747644f2330ad4cfcbf6adc5df8d46768c81d\": container with ID starting with 3f49cf74a2475cf1e98e3d799a3747644f2330ad4cfcbf6adc5df8d46768c81d not found: ID does not exist" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.663775 4884 scope.go:117] "RemoveContainer" containerID="e9a55e3ed5f9ef5c9883c8b5ecfe0168aec55504d5513ec38f38cbbcdfcaadf6" Dec 10 01:55:59 crc kubenswrapper[4884]: E1210 01:55:59.664273 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9a55e3ed5f9ef5c9883c8b5ecfe0168aec55504d5513ec38f38cbbcdfcaadf6\": container with ID starting with e9a55e3ed5f9ef5c9883c8b5ecfe0168aec55504d5513ec38f38cbbcdfcaadf6 not found: ID does not exist" containerID="e9a55e3ed5f9ef5c9883c8b5ecfe0168aec55504d5513ec38f38cbbcdfcaadf6" Dec 10 01:55:59 crc kubenswrapper[4884]: I1210 01:55:59.664390 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9a55e3ed5f9ef5c9883c8b5ecfe0168aec55504d5513ec38f38cbbcdfcaadf6"} err="failed to get container status \"e9a55e3ed5f9ef5c9883c8b5ecfe0168aec55504d5513ec38f38cbbcdfcaadf6\": rpc error: code = NotFound desc = could not find container \"e9a55e3ed5f9ef5c9883c8b5ecfe0168aec55504d5513ec38f38cbbcdfcaadf6\": container with ID starting with e9a55e3ed5f9ef5c9883c8b5ecfe0168aec55504d5513ec38f38cbbcdfcaadf6 not found: ID does not exist" Dec 10 01:56:00 crc kubenswrapper[4884]: E1210 01:56:00.289590 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:56:01 crc kubenswrapper[4884]: I1210 01:56:01.298006 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="030808a0-e54d-41f9-b305-22639e8239f7" path="/var/lib/kubelet/pods/030808a0-e54d-41f9-b305-22639e8239f7/volumes" Dec 10 01:56:08 crc kubenswrapper[4884]: E1210 01:56:08.291619 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:56:09 crc kubenswrapper[4884]: I1210 01:56:09.287916 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:56:09 crc kubenswrapper[4884]: E1210 01:56:09.288859 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:56:14 crc kubenswrapper[4884]: I1210 01:56:14.292543 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 01:56:14 crc kubenswrapper[4884]: E1210 01:56:14.424909 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:56:14 crc kubenswrapper[4884]: E1210 01:56:14.424970 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 01:56:14 crc kubenswrapper[4884]: E1210 01:56:14.425082 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:56:14 crc kubenswrapper[4884]: E1210 01:56:14.426287 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:56:20 crc kubenswrapper[4884]: I1210 01:56:20.288421 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:56:20 crc kubenswrapper[4884]: E1210 01:56:20.290661 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:56:21 crc kubenswrapper[4884]: E1210 01:56:21.291158 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:56:27 crc kubenswrapper[4884]: E1210 01:56:27.300459 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:56:34 crc kubenswrapper[4884]: I1210 01:56:34.287078 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:56:34 crc kubenswrapper[4884]: E1210 01:56:34.287769 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:56:36 crc kubenswrapper[4884]: E1210 01:56:36.425798 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:56:36 crc kubenswrapper[4884]: E1210 01:56:36.426205 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 01:56:36 crc kubenswrapper[4884]: E1210 01:56:36.426408 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 01:56:36 crc kubenswrapper[4884]: E1210 01:56:36.427726 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:56:38 crc kubenswrapper[4884]: E1210 01:56:38.290017 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:56:46 crc kubenswrapper[4884]: I1210 01:56:46.288975 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:56:46 crc kubenswrapper[4884]: E1210 01:56:46.290364 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:56:49 crc kubenswrapper[4884]: E1210 01:56:49.291403 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:56:51 crc kubenswrapper[4884]: E1210 01:56:51.292623 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:57:00 crc kubenswrapper[4884]: I1210 01:57:00.289313 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:57:00 crc kubenswrapper[4884]: E1210 01:57:00.290645 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:57:03 crc kubenswrapper[4884]: E1210 01:57:03.299953 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:57:03 crc kubenswrapper[4884]: E1210 01:57:03.300100 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:57:15 crc kubenswrapper[4884]: I1210 01:57:15.286926 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:57:15 crc kubenswrapper[4884]: E1210 01:57:15.287916 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:57:16 crc kubenswrapper[4884]: E1210 01:57:16.290200 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:57:17 crc kubenswrapper[4884]: E1210 01:57:17.309054 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:57:27 crc kubenswrapper[4884]: I1210 01:57:27.300950 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:57:27 crc kubenswrapper[4884]: E1210 01:57:27.302083 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:57:30 crc kubenswrapper[4884]: E1210 01:57:30.291085 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:57:31 crc kubenswrapper[4884]: E1210 01:57:31.290912 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:57:40 crc kubenswrapper[4884]: I1210 01:57:40.287427 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:57:40 crc kubenswrapper[4884]: E1210 01:57:40.288327 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:57:41 crc kubenswrapper[4884]: E1210 01:57:41.290905 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:57:42 crc kubenswrapper[4884]: E1210 01:57:42.291070 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:57:52 crc kubenswrapper[4884]: E1210 01:57:52.290985 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:57:54 crc kubenswrapper[4884]: I1210 01:57:54.289405 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:57:54 crc kubenswrapper[4884]: E1210 01:57:54.290904 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:57:54 crc kubenswrapper[4884]: E1210 01:57:54.293748 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:58:04 crc kubenswrapper[4884]: E1210 01:58:04.292374 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:58:06 crc kubenswrapper[4884]: E1210 01:58:06.289135 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:58:09 crc kubenswrapper[4884]: I1210 01:58:09.289964 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:58:09 crc kubenswrapper[4884]: E1210 01:58:09.291040 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:58:19 crc kubenswrapper[4884]: E1210 01:58:19.290647 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:58:20 crc kubenswrapper[4884]: E1210 01:58:20.288902 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:58:24 crc kubenswrapper[4884]: I1210 01:58:24.287533 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:58:24 crc kubenswrapper[4884]: E1210 01:58:24.288608 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:58:31 crc kubenswrapper[4884]: E1210 01:58:31.289573 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:58:33 crc kubenswrapper[4884]: E1210 01:58:33.289013 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:58:36 crc kubenswrapper[4884]: I1210 01:58:36.288092 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:58:36 crc kubenswrapper[4884]: E1210 01:58:36.289125 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 01:58:46 crc kubenswrapper[4884]: E1210 01:58:46.290287 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:58:46 crc kubenswrapper[4884]: I1210 01:58:46.786416 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lfnst"] Dec 10 01:58:46 crc kubenswrapper[4884]: E1210 01:58:46.786917 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="030808a0-e54d-41f9-b305-22639e8239f7" containerName="registry-server" Dec 10 01:58:46 crc kubenswrapper[4884]: I1210 01:58:46.786937 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="030808a0-e54d-41f9-b305-22639e8239f7" containerName="registry-server" Dec 10 01:58:46 crc kubenswrapper[4884]: E1210 01:58:46.786961 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="030808a0-e54d-41f9-b305-22639e8239f7" containerName="extract-content" Dec 10 01:58:46 crc kubenswrapper[4884]: I1210 01:58:46.786969 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="030808a0-e54d-41f9-b305-22639e8239f7" containerName="extract-content" Dec 10 01:58:46 crc kubenswrapper[4884]: E1210 01:58:46.786985 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="030808a0-e54d-41f9-b305-22639e8239f7" containerName="extract-utilities" Dec 10 01:58:46 crc kubenswrapper[4884]: I1210 01:58:46.786995 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="030808a0-e54d-41f9-b305-22639e8239f7" containerName="extract-utilities" Dec 10 01:58:46 crc kubenswrapper[4884]: I1210 01:58:46.787265 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="030808a0-e54d-41f9-b305-22639e8239f7" containerName="registry-server" Dec 10 01:58:46 crc kubenswrapper[4884]: I1210 01:58:46.789088 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:46 crc kubenswrapper[4884]: I1210 01:58:46.807455 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfnst"] Dec 10 01:58:46 crc kubenswrapper[4884]: I1210 01:58:46.937311 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-utilities\") pod \"redhat-marketplace-lfnst\" (UID: \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\") " pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:46 crc kubenswrapper[4884]: I1210 01:58:46.937379 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-catalog-content\") pod \"redhat-marketplace-lfnst\" (UID: \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\") " pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:46 crc kubenswrapper[4884]: I1210 01:58:46.937610 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b88j5\" (UniqueName: \"kubernetes.io/projected/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-kube-api-access-b88j5\") pod \"redhat-marketplace-lfnst\" (UID: \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\") " pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:47 crc kubenswrapper[4884]: I1210 01:58:47.041878 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b88j5\" (UniqueName: \"kubernetes.io/projected/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-kube-api-access-b88j5\") pod \"redhat-marketplace-lfnst\" (UID: \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\") " pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:47 crc kubenswrapper[4884]: I1210 01:58:47.041979 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-utilities\") pod \"redhat-marketplace-lfnst\" (UID: \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\") " pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:47 crc kubenswrapper[4884]: I1210 01:58:47.042004 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-catalog-content\") pod \"redhat-marketplace-lfnst\" (UID: \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\") " pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:47 crc kubenswrapper[4884]: I1210 01:58:47.042517 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-catalog-content\") pod \"redhat-marketplace-lfnst\" (UID: \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\") " pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:47 crc kubenswrapper[4884]: I1210 01:58:47.043098 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-utilities\") pod \"redhat-marketplace-lfnst\" (UID: \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\") " pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:47 crc kubenswrapper[4884]: I1210 01:58:47.080685 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b88j5\" (UniqueName: \"kubernetes.io/projected/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-kube-api-access-b88j5\") pod \"redhat-marketplace-lfnst\" (UID: \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\") " pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:47 crc kubenswrapper[4884]: I1210 01:58:47.121608 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:47 crc kubenswrapper[4884]: E1210 01:58:47.301331 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:58:47 crc kubenswrapper[4884]: I1210 01:58:47.585543 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfnst"] Dec 10 01:58:48 crc kubenswrapper[4884]: I1210 01:58:48.193157 4884 generic.go:334] "Generic (PLEG): container finished" podID="bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" containerID="c410f8c3b9218554b1331bf99ba021ca11cb0bc923ce8f79c834a0a035eddfac" exitCode=0 Dec 10 01:58:48 crc kubenswrapper[4884]: I1210 01:58:48.193253 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfnst" event={"ID":"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a","Type":"ContainerDied","Data":"c410f8c3b9218554b1331bf99ba021ca11cb0bc923ce8f79c834a0a035eddfac"} Dec 10 01:58:48 crc kubenswrapper[4884]: I1210 01:58:48.193597 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfnst" event={"ID":"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a","Type":"ContainerStarted","Data":"1057d219737a68ea1ca5f895412a13eef6c54ba56f9b5563d945bef3b9ddb20d"} Dec 10 01:58:49 crc kubenswrapper[4884]: I1210 01:58:49.215108 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfnst" event={"ID":"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a","Type":"ContainerStarted","Data":"37b572529b300fd414b410c8621a71179df1dee78b7141da87e82686a076e01e"} Dec 10 01:58:49 crc kubenswrapper[4884]: I1210 01:58:49.287390 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 01:58:50 crc kubenswrapper[4884]: I1210 01:58:50.239198 4884 generic.go:334] "Generic (PLEG): container finished" podID="bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" containerID="37b572529b300fd414b410c8621a71179df1dee78b7141da87e82686a076e01e" exitCode=0 Dec 10 01:58:50 crc kubenswrapper[4884]: I1210 01:58:50.239767 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfnst" event={"ID":"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a","Type":"ContainerDied","Data":"37b572529b300fd414b410c8621a71179df1dee78b7141da87e82686a076e01e"} Dec 10 01:58:50 crc kubenswrapper[4884]: I1210 01:58:50.244649 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"e8f4eac03ccca0f0d31a1e5527a9ae76c203b8538de41b60f9d5802efc9a7452"} Dec 10 01:58:51 crc kubenswrapper[4884]: I1210 01:58:51.258171 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfnst" event={"ID":"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a","Type":"ContainerStarted","Data":"266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d"} Dec 10 01:58:51 crc kubenswrapper[4884]: I1210 01:58:51.280100 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lfnst" podStartSLOduration=2.514329658 podStartE2EDuration="5.280080181s" podCreationTimestamp="2025-12-10 01:58:46 +0000 UTC" firstStartedPulling="2025-12-10 01:58:48.195294459 +0000 UTC m=+5301.273251586" lastFinishedPulling="2025-12-10 01:58:50.961044982 +0000 UTC m=+5304.039002109" observedRunningTime="2025-12-10 01:58:51.277259285 +0000 UTC m=+5304.355216422" watchObservedRunningTime="2025-12-10 01:58:51.280080181 +0000 UTC m=+5304.358037308" Dec 10 01:58:57 crc kubenswrapper[4884]: I1210 01:58:57.123581 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:57 crc kubenswrapper[4884]: I1210 01:58:57.124142 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:57 crc kubenswrapper[4884]: I1210 01:58:57.179226 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:57 crc kubenswrapper[4884]: I1210 01:58:57.388998 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:57 crc kubenswrapper[4884]: I1210 01:58:57.443457 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfnst"] Dec 10 01:58:59 crc kubenswrapper[4884]: E1210 01:58:59.289775 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:58:59 crc kubenswrapper[4884]: I1210 01:58:59.364279 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lfnst" podUID="bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" containerName="registry-server" containerID="cri-o://266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d" gracePeriod=2 Dec 10 01:58:59 crc kubenswrapper[4884]: I1210 01:58:59.915909 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:58:59 crc kubenswrapper[4884]: I1210 01:58:59.954587 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-catalog-content\") pod \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\" (UID: \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\") " Dec 10 01:58:59 crc kubenswrapper[4884]: I1210 01:58:59.968647 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b88j5\" (UniqueName: \"kubernetes.io/projected/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-kube-api-access-b88j5\") pod \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\" (UID: \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\") " Dec 10 01:58:59 crc kubenswrapper[4884]: I1210 01:58:59.969047 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-utilities\") pod \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\" (UID: \"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a\") " Dec 10 01:58:59 crc kubenswrapper[4884]: I1210 01:58:59.969947 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-utilities" (OuterVolumeSpecName: "utilities") pod "bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" (UID: "bff3bb7a-ba7d-4905-9a69-b44f35d33d7a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:58:59 crc kubenswrapper[4884]: I1210 01:58:59.970315 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 01:58:59 crc kubenswrapper[4884]: I1210 01:58:59.982192 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" (UID: "bff3bb7a-ba7d-4905-9a69-b44f35d33d7a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 01:58:59 crc kubenswrapper[4884]: I1210 01:58:59.983576 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-kube-api-access-b88j5" (OuterVolumeSpecName: "kube-api-access-b88j5") pod "bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" (UID: "bff3bb7a-ba7d-4905-9a69-b44f35d33d7a"). InnerVolumeSpecName "kube-api-access-b88j5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.072798 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.073146 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b88j5\" (UniqueName: \"kubernetes.io/projected/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a-kube-api-access-b88j5\") on node \"crc\" DevicePath \"\"" Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.374721 4884 generic.go:334] "Generic (PLEG): container finished" podID="bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" containerID="266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d" exitCode=0 Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.375964 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfnst" event={"ID":"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a","Type":"ContainerDied","Data":"266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d"} Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.376066 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfnst" event={"ID":"bff3bb7a-ba7d-4905-9a69-b44f35d33d7a","Type":"ContainerDied","Data":"1057d219737a68ea1ca5f895412a13eef6c54ba56f9b5563d945bef3b9ddb20d"} Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.376149 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lfnst" Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.376092 4884 scope.go:117] "RemoveContainer" containerID="266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d" Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.410314 4884 scope.go:117] "RemoveContainer" containerID="37b572529b300fd414b410c8621a71179df1dee78b7141da87e82686a076e01e" Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.423983 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfnst"] Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.435964 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfnst"] Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.437494 4884 scope.go:117] "RemoveContainer" containerID="c410f8c3b9218554b1331bf99ba021ca11cb0bc923ce8f79c834a0a035eddfac" Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.506842 4884 scope.go:117] "RemoveContainer" containerID="266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d" Dec 10 01:59:00 crc kubenswrapper[4884]: E1210 01:59:00.507333 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d\": container with ID starting with 266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d not found: ID does not exist" containerID="266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d" Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.507394 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d"} err="failed to get container status \"266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d\": rpc error: code = NotFound desc = could not find container \"266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d\": container with ID starting with 266028d6534a6fadaddf626c97f20887186821cad7c73ca028820434adcf5a7d not found: ID does not exist" Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.507426 4884 scope.go:117] "RemoveContainer" containerID="37b572529b300fd414b410c8621a71179df1dee78b7141da87e82686a076e01e" Dec 10 01:59:00 crc kubenswrapper[4884]: E1210 01:59:00.507939 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37b572529b300fd414b410c8621a71179df1dee78b7141da87e82686a076e01e\": container with ID starting with 37b572529b300fd414b410c8621a71179df1dee78b7141da87e82686a076e01e not found: ID does not exist" containerID="37b572529b300fd414b410c8621a71179df1dee78b7141da87e82686a076e01e" Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.507982 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37b572529b300fd414b410c8621a71179df1dee78b7141da87e82686a076e01e"} err="failed to get container status \"37b572529b300fd414b410c8621a71179df1dee78b7141da87e82686a076e01e\": rpc error: code = NotFound desc = could not find container \"37b572529b300fd414b410c8621a71179df1dee78b7141da87e82686a076e01e\": container with ID starting with 37b572529b300fd414b410c8621a71179df1dee78b7141da87e82686a076e01e not found: ID does not exist" Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.508011 4884 scope.go:117] "RemoveContainer" containerID="c410f8c3b9218554b1331bf99ba021ca11cb0bc923ce8f79c834a0a035eddfac" Dec 10 01:59:00 crc kubenswrapper[4884]: E1210 01:59:00.508750 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c410f8c3b9218554b1331bf99ba021ca11cb0bc923ce8f79c834a0a035eddfac\": container with ID starting with c410f8c3b9218554b1331bf99ba021ca11cb0bc923ce8f79c834a0a035eddfac not found: ID does not exist" containerID="c410f8c3b9218554b1331bf99ba021ca11cb0bc923ce8f79c834a0a035eddfac" Dec 10 01:59:00 crc kubenswrapper[4884]: I1210 01:59:00.508793 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c410f8c3b9218554b1331bf99ba021ca11cb0bc923ce8f79c834a0a035eddfac"} err="failed to get container status \"c410f8c3b9218554b1331bf99ba021ca11cb0bc923ce8f79c834a0a035eddfac\": rpc error: code = NotFound desc = could not find container \"c410f8c3b9218554b1331bf99ba021ca11cb0bc923ce8f79c834a0a035eddfac\": container with ID starting with c410f8c3b9218554b1331bf99ba021ca11cb0bc923ce8f79c834a0a035eddfac not found: ID does not exist" Dec 10 01:59:01 crc kubenswrapper[4884]: E1210 01:59:01.288604 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:59:01 crc kubenswrapper[4884]: I1210 01:59:01.298382 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" path="/var/lib/kubelet/pods/bff3bb7a-ba7d-4905-9a69-b44f35d33d7a/volumes" Dec 10 01:59:14 crc kubenswrapper[4884]: E1210 01:59:14.291576 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:59:15 crc kubenswrapper[4884]: E1210 01:59:15.290879 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:59:28 crc kubenswrapper[4884]: E1210 01:59:28.289711 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:59:29 crc kubenswrapper[4884]: E1210 01:59:29.289248 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:59:42 crc kubenswrapper[4884]: E1210 01:59:42.290676 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:59:43 crc kubenswrapper[4884]: E1210 01:59:43.290573 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:59:43 crc kubenswrapper[4884]: I1210 01:59:43.852563 4884 scope.go:117] "RemoveContainer" containerID="bd19801a6eb3b31cafaa8cafffa11149990974a4f74fb037f98f3bc065ff4d8b" Dec 10 01:59:44 crc kubenswrapper[4884]: I1210 01:59:44.269124 4884 scope.go:117] "RemoveContainer" containerID="a6b339cc951cf436de8f2448457993d2f1e88392f61c4c139996209e14ed400f" Dec 10 01:59:44 crc kubenswrapper[4884]: I1210 01:59:44.302616 4884 scope.go:117] "RemoveContainer" containerID="9a84b8d17ca8ef3877cbfd9096faf27462f594b09462c776c9a3ffc69565158f" Dec 10 01:59:55 crc kubenswrapper[4884]: E1210 01:59:55.290646 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 01:59:56 crc kubenswrapper[4884]: E1210 01:59:56.289080 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.455261 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-5p69z/must-gather-kgx8d"] Dec 10 01:59:56 crc kubenswrapper[4884]: E1210 01:59:56.455782 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" containerName="registry-server" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.455796 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" containerName="registry-server" Dec 10 01:59:56 crc kubenswrapper[4884]: E1210 01:59:56.455818 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" containerName="extract-content" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.455825 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" containerName="extract-content" Dec 10 01:59:56 crc kubenswrapper[4884]: E1210 01:59:56.455842 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" containerName="extract-utilities" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.455851 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" containerName="extract-utilities" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.456042 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="bff3bb7a-ba7d-4905-9a69-b44f35d33d7a" containerName="registry-server" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.457147 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5p69z/must-gather-kgx8d" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.459388 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-5p69z"/"openshift-service-ca.crt" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.459393 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-5p69z"/"kube-root-ca.crt" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.459733 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-5p69z"/"default-dockercfg-wsng2" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.480146 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-5p69z/must-gather-kgx8d"] Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.573624 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9gz5\" (UniqueName: \"kubernetes.io/projected/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e-kube-api-access-b9gz5\") pod \"must-gather-kgx8d\" (UID: \"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e\") " pod="openshift-must-gather-5p69z/must-gather-kgx8d" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.573986 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e-must-gather-output\") pod \"must-gather-kgx8d\" (UID: \"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e\") " pod="openshift-must-gather-5p69z/must-gather-kgx8d" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.676262 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e-must-gather-output\") pod \"must-gather-kgx8d\" (UID: \"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e\") " pod="openshift-must-gather-5p69z/must-gather-kgx8d" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.676688 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9gz5\" (UniqueName: \"kubernetes.io/projected/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e-kube-api-access-b9gz5\") pod \"must-gather-kgx8d\" (UID: \"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e\") " pod="openshift-must-gather-5p69z/must-gather-kgx8d" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.676765 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e-must-gather-output\") pod \"must-gather-kgx8d\" (UID: \"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e\") " pod="openshift-must-gather-5p69z/must-gather-kgx8d" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.700101 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9gz5\" (UniqueName: \"kubernetes.io/projected/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e-kube-api-access-b9gz5\") pod \"must-gather-kgx8d\" (UID: \"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e\") " pod="openshift-must-gather-5p69z/must-gather-kgx8d" Dec 10 01:59:56 crc kubenswrapper[4884]: I1210 01:59:56.779061 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5p69z/must-gather-kgx8d" Dec 10 01:59:57 crc kubenswrapper[4884]: I1210 01:59:57.327147 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-5p69z/must-gather-kgx8d"] Dec 10 01:59:58 crc kubenswrapper[4884]: I1210 01:59:58.157406 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5p69z/must-gather-kgx8d" event={"ID":"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e","Type":"ContainerStarted","Data":"b6f8f6cd3ca7469e5a9aa254933492eb89db85975c0f6d13b58328928ab92674"} Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.145643 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5"] Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.148497 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.150667 4884 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.151545 4884 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.154959 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5"] Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.261277 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/828f98a4-6a70-43f6-b48c-8f3d1de1436f-config-volume\") pod \"collect-profiles-29422200-74jm5\" (UID: \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.261672 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfbj2\" (UniqueName: \"kubernetes.io/projected/828f98a4-6a70-43f6-b48c-8f3d1de1436f-kube-api-access-zfbj2\") pod \"collect-profiles-29422200-74jm5\" (UID: \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.261866 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/828f98a4-6a70-43f6-b48c-8f3d1de1436f-secret-volume\") pod \"collect-profiles-29422200-74jm5\" (UID: \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.363735 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/828f98a4-6a70-43f6-b48c-8f3d1de1436f-config-volume\") pod \"collect-profiles-29422200-74jm5\" (UID: \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.364008 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfbj2\" (UniqueName: \"kubernetes.io/projected/828f98a4-6a70-43f6-b48c-8f3d1de1436f-kube-api-access-zfbj2\") pod \"collect-profiles-29422200-74jm5\" (UID: \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.364083 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/828f98a4-6a70-43f6-b48c-8f3d1de1436f-secret-volume\") pod \"collect-profiles-29422200-74jm5\" (UID: \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.364559 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/828f98a4-6a70-43f6-b48c-8f3d1de1436f-config-volume\") pod \"collect-profiles-29422200-74jm5\" (UID: \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.370635 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/828f98a4-6a70-43f6-b48c-8f3d1de1436f-secret-volume\") pod \"collect-profiles-29422200-74jm5\" (UID: \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.396202 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfbj2\" (UniqueName: \"kubernetes.io/projected/828f98a4-6a70-43f6-b48c-8f3d1de1436f-kube-api-access-zfbj2\") pod \"collect-profiles-29422200-74jm5\" (UID: \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:00 crc kubenswrapper[4884]: I1210 02:00:00.480271 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:05 crc kubenswrapper[4884]: I1210 02:00:05.240910 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5p69z/must-gather-kgx8d" event={"ID":"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e","Type":"ContainerStarted","Data":"46f05ebeabcb55bbaefc282df0564fb9c3b45a8903ed8181e18c14af7153072c"} Dec 10 02:00:05 crc kubenswrapper[4884]: I1210 02:00:05.278016 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5"] Dec 10 02:00:06 crc kubenswrapper[4884]: I1210 02:00:06.256390 4884 generic.go:334] "Generic (PLEG): container finished" podID="828f98a4-6a70-43f6-b48c-8f3d1de1436f" containerID="103b6f29e80de75f04cb184849a5d91d75811f0beadd4a86c75d51ef401905f7" exitCode=0 Dec 10 02:00:06 crc kubenswrapper[4884]: I1210 02:00:06.256786 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" event={"ID":"828f98a4-6a70-43f6-b48c-8f3d1de1436f","Type":"ContainerDied","Data":"103b6f29e80de75f04cb184849a5d91d75811f0beadd4a86c75d51ef401905f7"} Dec 10 02:00:06 crc kubenswrapper[4884]: I1210 02:00:06.256894 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" event={"ID":"828f98a4-6a70-43f6-b48c-8f3d1de1436f","Type":"ContainerStarted","Data":"798aedc0b3757e29abb503e913c31824d26a192d1bb417bbd07a95ca12f5442c"} Dec 10 02:00:06 crc kubenswrapper[4884]: I1210 02:00:06.260853 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5p69z/must-gather-kgx8d" event={"ID":"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e","Type":"ContainerStarted","Data":"ab72a066be90c2693124223d0a94733371e0d0c2adcf865cf72287bc33721dc1"} Dec 10 02:00:06 crc kubenswrapper[4884]: I1210 02:00:06.322475 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-5p69z/must-gather-kgx8d" podStartSLOduration=2.84595749 podStartE2EDuration="10.322451173s" podCreationTimestamp="2025-12-10 01:59:56 +0000 UTC" firstStartedPulling="2025-12-10 01:59:57.338116181 +0000 UTC m=+5370.416073308" lastFinishedPulling="2025-12-10 02:00:04.814609874 +0000 UTC m=+5377.892566991" observedRunningTime="2025-12-10 02:00:06.304777489 +0000 UTC m=+5379.382734606" watchObservedRunningTime="2025-12-10 02:00:06.322451173 +0000 UTC m=+5379.400408300" Dec 10 02:00:07 crc kubenswrapper[4884]: I1210 02:00:07.701937 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:07 crc kubenswrapper[4884]: I1210 02:00:07.829689 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/828f98a4-6a70-43f6-b48c-8f3d1de1436f-config-volume\") pod \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\" (UID: \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\") " Dec 10 02:00:07 crc kubenswrapper[4884]: I1210 02:00:07.829849 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/828f98a4-6a70-43f6-b48c-8f3d1de1436f-secret-volume\") pod \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\" (UID: \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\") " Dec 10 02:00:07 crc kubenswrapper[4884]: I1210 02:00:07.829972 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfbj2\" (UniqueName: \"kubernetes.io/projected/828f98a4-6a70-43f6-b48c-8f3d1de1436f-kube-api-access-zfbj2\") pod \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\" (UID: \"828f98a4-6a70-43f6-b48c-8f3d1de1436f\") " Dec 10 02:00:07 crc kubenswrapper[4884]: I1210 02:00:07.830337 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/828f98a4-6a70-43f6-b48c-8f3d1de1436f-config-volume" (OuterVolumeSpecName: "config-volume") pod "828f98a4-6a70-43f6-b48c-8f3d1de1436f" (UID: "828f98a4-6a70-43f6-b48c-8f3d1de1436f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 10 02:00:07 crc kubenswrapper[4884]: I1210 02:00:07.830425 4884 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/828f98a4-6a70-43f6-b48c-8f3d1de1436f-config-volume\") on node \"crc\" DevicePath \"\"" Dec 10 02:00:08 crc kubenswrapper[4884]: I1210 02:00:08.169885 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/828f98a4-6a70-43f6-b48c-8f3d1de1436f-kube-api-access-zfbj2" (OuterVolumeSpecName: "kube-api-access-zfbj2") pod "828f98a4-6a70-43f6-b48c-8f3d1de1436f" (UID: "828f98a4-6a70-43f6-b48c-8f3d1de1436f"). InnerVolumeSpecName "kube-api-access-zfbj2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 02:00:08 crc kubenswrapper[4884]: I1210 02:00:08.175740 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/828f98a4-6a70-43f6-b48c-8f3d1de1436f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "828f98a4-6a70-43f6-b48c-8f3d1de1436f" (UID: "828f98a4-6a70-43f6-b48c-8f3d1de1436f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 02:00:08 crc kubenswrapper[4884]: I1210 02:00:08.239880 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfbj2\" (UniqueName: \"kubernetes.io/projected/828f98a4-6a70-43f6-b48c-8f3d1de1436f-kube-api-access-zfbj2\") on node \"crc\" DevicePath \"\"" Dec 10 02:00:08 crc kubenswrapper[4884]: I1210 02:00:08.239958 4884 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/828f98a4-6a70-43f6-b48c-8f3d1de1436f-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 10 02:00:08 crc kubenswrapper[4884]: I1210 02:00:08.286681 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" event={"ID":"828f98a4-6a70-43f6-b48c-8f3d1de1436f","Type":"ContainerDied","Data":"798aedc0b3757e29abb503e913c31824d26a192d1bb417bbd07a95ca12f5442c"} Dec 10 02:00:08 crc kubenswrapper[4884]: I1210 02:00:08.286734 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="798aedc0b3757e29abb503e913c31824d26a192d1bb417bbd07a95ca12f5442c" Dec 10 02:00:08 crc kubenswrapper[4884]: I1210 02:00:08.286771 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29422200-74jm5" Dec 10 02:00:08 crc kubenswrapper[4884]: I1210 02:00:08.795493 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk"] Dec 10 02:00:08 crc kubenswrapper[4884]: I1210 02:00:08.802273 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29422155-pt2vk"] Dec 10 02:00:09 crc kubenswrapper[4884]: E1210 02:00:09.289380 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:00:09 crc kubenswrapper[4884]: I1210 02:00:09.298692 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21296a82-a64c-4080-8048-c3e408ac6ffd" path="/var/lib/kubelet/pods/21296a82-a64c-4080-8048-c3e408ac6ffd/volumes" Dec 10 02:00:09 crc kubenswrapper[4884]: I1210 02:00:09.678464 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-5p69z/crc-debug-t2dxs"] Dec 10 02:00:09 crc kubenswrapper[4884]: E1210 02:00:09.679667 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="828f98a4-6a70-43f6-b48c-8f3d1de1436f" containerName="collect-profiles" Dec 10 02:00:09 crc kubenswrapper[4884]: I1210 02:00:09.679745 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="828f98a4-6a70-43f6-b48c-8f3d1de1436f" containerName="collect-profiles" Dec 10 02:00:09 crc kubenswrapper[4884]: I1210 02:00:09.680215 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="828f98a4-6a70-43f6-b48c-8f3d1de1436f" containerName="collect-profiles" Dec 10 02:00:09 crc kubenswrapper[4884]: I1210 02:00:09.681474 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5p69z/crc-debug-t2dxs" Dec 10 02:00:09 crc kubenswrapper[4884]: I1210 02:00:09.784081 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b7238a6e-783e-4b92-887a-0746edf2a352-host\") pod \"crc-debug-t2dxs\" (UID: \"b7238a6e-783e-4b92-887a-0746edf2a352\") " pod="openshift-must-gather-5p69z/crc-debug-t2dxs" Dec 10 02:00:09 crc kubenswrapper[4884]: I1210 02:00:09.784287 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxxx9\" (UniqueName: \"kubernetes.io/projected/b7238a6e-783e-4b92-887a-0746edf2a352-kube-api-access-zxxx9\") pod \"crc-debug-t2dxs\" (UID: \"b7238a6e-783e-4b92-887a-0746edf2a352\") " pod="openshift-must-gather-5p69z/crc-debug-t2dxs" Dec 10 02:00:09 crc kubenswrapper[4884]: I1210 02:00:09.886069 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b7238a6e-783e-4b92-887a-0746edf2a352-host\") pod \"crc-debug-t2dxs\" (UID: \"b7238a6e-783e-4b92-887a-0746edf2a352\") " pod="openshift-must-gather-5p69z/crc-debug-t2dxs" Dec 10 02:00:09 crc kubenswrapper[4884]: I1210 02:00:09.886185 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b7238a6e-783e-4b92-887a-0746edf2a352-host\") pod \"crc-debug-t2dxs\" (UID: \"b7238a6e-783e-4b92-887a-0746edf2a352\") " pod="openshift-must-gather-5p69z/crc-debug-t2dxs" Dec 10 02:00:09 crc kubenswrapper[4884]: I1210 02:00:09.886292 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxxx9\" (UniqueName: \"kubernetes.io/projected/b7238a6e-783e-4b92-887a-0746edf2a352-kube-api-access-zxxx9\") pod \"crc-debug-t2dxs\" (UID: \"b7238a6e-783e-4b92-887a-0746edf2a352\") " pod="openshift-must-gather-5p69z/crc-debug-t2dxs" Dec 10 02:00:10 crc kubenswrapper[4884]: E1210 02:00:10.289945 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:00:10 crc kubenswrapper[4884]: I1210 02:00:10.471896 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxxx9\" (UniqueName: \"kubernetes.io/projected/b7238a6e-783e-4b92-887a-0746edf2a352-kube-api-access-zxxx9\") pod \"crc-debug-t2dxs\" (UID: \"b7238a6e-783e-4b92-887a-0746edf2a352\") " pod="openshift-must-gather-5p69z/crc-debug-t2dxs" Dec 10 02:00:10 crc kubenswrapper[4884]: I1210 02:00:10.602582 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5p69z/crc-debug-t2dxs" Dec 10 02:00:11 crc kubenswrapper[4884]: I1210 02:00:11.321157 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5p69z/crc-debug-t2dxs" event={"ID":"b7238a6e-783e-4b92-887a-0746edf2a352","Type":"ContainerStarted","Data":"23893aab75e6f707de1eb37ee9339e583e141b3c537c0b04dff17d7ed70b5a07"} Dec 10 02:00:21 crc kubenswrapper[4884]: I1210 02:00:21.449752 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5p69z/crc-debug-t2dxs" event={"ID":"b7238a6e-783e-4b92-887a-0746edf2a352","Type":"ContainerStarted","Data":"c8d259b5cf83d4fe23cf47d12b55735c4fd7d0c4dd1855063c98e60ba2d5941d"} Dec 10 02:00:21 crc kubenswrapper[4884]: I1210 02:00:21.465538 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-5p69z/crc-debug-t2dxs" podStartSLOduration=2.194252544 podStartE2EDuration="12.465517458s" podCreationTimestamp="2025-12-10 02:00:09 +0000 UTC" firstStartedPulling="2025-12-10 02:00:10.660783713 +0000 UTC m=+5383.738740850" lastFinishedPulling="2025-12-10 02:00:20.932048647 +0000 UTC m=+5394.010005764" observedRunningTime="2025-12-10 02:00:21.461317825 +0000 UTC m=+5394.539274942" watchObservedRunningTime="2025-12-10 02:00:21.465517458 +0000 UTC m=+5394.543474575" Dec 10 02:00:23 crc kubenswrapper[4884]: E1210 02:00:23.290169 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:00:24 crc kubenswrapper[4884]: E1210 02:00:24.289794 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:00:35 crc kubenswrapper[4884]: E1210 02:00:35.290797 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:00:35 crc kubenswrapper[4884]: E1210 02:00:35.291074 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:00:44 crc kubenswrapper[4884]: I1210 02:00:44.381701 4884 scope.go:117] "RemoveContainer" containerID="4cfc306f0ae70b8d884eee9ac8d50ecc2e0bdb72a46a3369a3231aa2662dd063" Dec 10 02:00:44 crc kubenswrapper[4884]: I1210 02:00:44.719160 4884 generic.go:334] "Generic (PLEG): container finished" podID="b7238a6e-783e-4b92-887a-0746edf2a352" containerID="c8d259b5cf83d4fe23cf47d12b55735c4fd7d0c4dd1855063c98e60ba2d5941d" exitCode=0 Dec 10 02:00:44 crc kubenswrapper[4884]: I1210 02:00:44.719230 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5p69z/crc-debug-t2dxs" event={"ID":"b7238a6e-783e-4b92-887a-0746edf2a352","Type":"ContainerDied","Data":"c8d259b5cf83d4fe23cf47d12b55735c4fd7d0c4dd1855063c98e60ba2d5941d"} Dec 10 02:00:45 crc kubenswrapper[4884]: I1210 02:00:45.837058 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5p69z/crc-debug-t2dxs" Dec 10 02:00:45 crc kubenswrapper[4884]: I1210 02:00:45.872593 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-5p69z/crc-debug-t2dxs"] Dec 10 02:00:45 crc kubenswrapper[4884]: I1210 02:00:45.886302 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-5p69z/crc-debug-t2dxs"] Dec 10 02:00:45 crc kubenswrapper[4884]: I1210 02:00:45.972587 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxxx9\" (UniqueName: \"kubernetes.io/projected/b7238a6e-783e-4b92-887a-0746edf2a352-kube-api-access-zxxx9\") pod \"b7238a6e-783e-4b92-887a-0746edf2a352\" (UID: \"b7238a6e-783e-4b92-887a-0746edf2a352\") " Dec 10 02:00:45 crc kubenswrapper[4884]: I1210 02:00:45.973136 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b7238a6e-783e-4b92-887a-0746edf2a352-host\") pod \"b7238a6e-783e-4b92-887a-0746edf2a352\" (UID: \"b7238a6e-783e-4b92-887a-0746edf2a352\") " Dec 10 02:00:45 crc kubenswrapper[4884]: I1210 02:00:45.973208 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b7238a6e-783e-4b92-887a-0746edf2a352-host" (OuterVolumeSpecName: "host") pod "b7238a6e-783e-4b92-887a-0746edf2a352" (UID: "b7238a6e-783e-4b92-887a-0746edf2a352"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 02:00:45 crc kubenswrapper[4884]: I1210 02:00:45.973938 4884 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b7238a6e-783e-4b92-887a-0746edf2a352-host\") on node \"crc\" DevicePath \"\"" Dec 10 02:00:46 crc kubenswrapper[4884]: I1210 02:00:46.050774 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7238a6e-783e-4b92-887a-0746edf2a352-kube-api-access-zxxx9" (OuterVolumeSpecName: "kube-api-access-zxxx9") pod "b7238a6e-783e-4b92-887a-0746edf2a352" (UID: "b7238a6e-783e-4b92-887a-0746edf2a352"). InnerVolumeSpecName "kube-api-access-zxxx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 02:00:46 crc kubenswrapper[4884]: I1210 02:00:46.076487 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxxx9\" (UniqueName: \"kubernetes.io/projected/b7238a6e-783e-4b92-887a-0746edf2a352-kube-api-access-zxxx9\") on node \"crc\" DevicePath \"\"" Dec 10 02:00:46 crc kubenswrapper[4884]: I1210 02:00:46.769732 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23893aab75e6f707de1eb37ee9339e583e141b3c537c0b04dff17d7ed70b5a07" Dec 10 02:00:46 crc kubenswrapper[4884]: I1210 02:00:46.769996 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5p69z/crc-debug-t2dxs" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.072900 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-5p69z/crc-debug-j4stb"] Dec 10 02:00:47 crc kubenswrapper[4884]: E1210 02:00:47.073347 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7238a6e-783e-4b92-887a-0746edf2a352" containerName="container-00" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.073362 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7238a6e-783e-4b92-887a-0746edf2a352" containerName="container-00" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.073579 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7238a6e-783e-4b92-887a-0746edf2a352" containerName="container-00" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.074272 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5p69z/crc-debug-j4stb" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.202647 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/07aa430b-2786-4743-b87c-bc702c87675c-host\") pod \"crc-debug-j4stb\" (UID: \"07aa430b-2786-4743-b87c-bc702c87675c\") " pod="openshift-must-gather-5p69z/crc-debug-j4stb" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.203140 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxht6\" (UniqueName: \"kubernetes.io/projected/07aa430b-2786-4743-b87c-bc702c87675c-kube-api-access-qxht6\") pod \"crc-debug-j4stb\" (UID: \"07aa430b-2786-4743-b87c-bc702c87675c\") " pod="openshift-must-gather-5p69z/crc-debug-j4stb" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.305599 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/07aa430b-2786-4743-b87c-bc702c87675c-host\") pod \"crc-debug-j4stb\" (UID: \"07aa430b-2786-4743-b87c-bc702c87675c\") " pod="openshift-must-gather-5p69z/crc-debug-j4stb" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.305754 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/07aa430b-2786-4743-b87c-bc702c87675c-host\") pod \"crc-debug-j4stb\" (UID: \"07aa430b-2786-4743-b87c-bc702c87675c\") " pod="openshift-must-gather-5p69z/crc-debug-j4stb" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.306395 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxht6\" (UniqueName: \"kubernetes.io/projected/07aa430b-2786-4743-b87c-bc702c87675c-kube-api-access-qxht6\") pod \"crc-debug-j4stb\" (UID: \"07aa430b-2786-4743-b87c-bc702c87675c\") " pod="openshift-must-gather-5p69z/crc-debug-j4stb" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.310988 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7238a6e-783e-4b92-887a-0746edf2a352" path="/var/lib/kubelet/pods/b7238a6e-783e-4b92-887a-0746edf2a352/volumes" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.334323 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxht6\" (UniqueName: \"kubernetes.io/projected/07aa430b-2786-4743-b87c-bc702c87675c-kube-api-access-qxht6\") pod \"crc-debug-j4stb\" (UID: \"07aa430b-2786-4743-b87c-bc702c87675c\") " pod="openshift-must-gather-5p69z/crc-debug-j4stb" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.395325 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5p69z/crc-debug-j4stb" Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.784149 4884 generic.go:334] "Generic (PLEG): container finished" podID="07aa430b-2786-4743-b87c-bc702c87675c" containerID="bdddf711936138f2eed1833ca4c167e87b5664f8c49377cbb2eece287cf08628" exitCode=1 Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.784255 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5p69z/crc-debug-j4stb" event={"ID":"07aa430b-2786-4743-b87c-bc702c87675c","Type":"ContainerDied","Data":"bdddf711936138f2eed1833ca4c167e87b5664f8c49377cbb2eece287cf08628"} Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.784473 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5p69z/crc-debug-j4stb" event={"ID":"07aa430b-2786-4743-b87c-bc702c87675c","Type":"ContainerStarted","Data":"96bb104c0b49bc9adde507c2dd49bf1d8caeef7273b7eaa3ebaafeeda9c59f79"} Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.821001 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-5p69z/crc-debug-j4stb"] Dec 10 02:00:47 crc kubenswrapper[4884]: I1210 02:00:47.829623 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-5p69z/crc-debug-j4stb"] Dec 10 02:00:49 crc kubenswrapper[4884]: E1210 02:00:49.290023 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:00:49 crc kubenswrapper[4884]: I1210 02:00:49.298205 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5p69z/crc-debug-j4stb" Dec 10 02:00:49 crc kubenswrapper[4884]: I1210 02:00:49.454110 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/07aa430b-2786-4743-b87c-bc702c87675c-host\") pod \"07aa430b-2786-4743-b87c-bc702c87675c\" (UID: \"07aa430b-2786-4743-b87c-bc702c87675c\") " Dec 10 02:00:49 crc kubenswrapper[4884]: I1210 02:00:49.454248 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/07aa430b-2786-4743-b87c-bc702c87675c-host" (OuterVolumeSpecName: "host") pod "07aa430b-2786-4743-b87c-bc702c87675c" (UID: "07aa430b-2786-4743-b87c-bc702c87675c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 10 02:00:49 crc kubenswrapper[4884]: I1210 02:00:49.454287 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxht6\" (UniqueName: \"kubernetes.io/projected/07aa430b-2786-4743-b87c-bc702c87675c-kube-api-access-qxht6\") pod \"07aa430b-2786-4743-b87c-bc702c87675c\" (UID: \"07aa430b-2786-4743-b87c-bc702c87675c\") " Dec 10 02:00:49 crc kubenswrapper[4884]: I1210 02:00:49.454861 4884 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/07aa430b-2786-4743-b87c-bc702c87675c-host\") on node \"crc\" DevicePath \"\"" Dec 10 02:00:49 crc kubenswrapper[4884]: I1210 02:00:49.460657 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07aa430b-2786-4743-b87c-bc702c87675c-kube-api-access-qxht6" (OuterVolumeSpecName: "kube-api-access-qxht6") pod "07aa430b-2786-4743-b87c-bc702c87675c" (UID: "07aa430b-2786-4743-b87c-bc702c87675c"). InnerVolumeSpecName "kube-api-access-qxht6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 02:00:49 crc kubenswrapper[4884]: I1210 02:00:49.556513 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxht6\" (UniqueName: \"kubernetes.io/projected/07aa430b-2786-4743-b87c-bc702c87675c-kube-api-access-qxht6\") on node \"crc\" DevicePath \"\"" Dec 10 02:00:49 crc kubenswrapper[4884]: I1210 02:00:49.809183 4884 scope.go:117] "RemoveContainer" containerID="bdddf711936138f2eed1833ca4c167e87b5664f8c49377cbb2eece287cf08628" Dec 10 02:00:49 crc kubenswrapper[4884]: I1210 02:00:49.809321 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5p69z/crc-debug-j4stb" Dec 10 02:00:50 crc kubenswrapper[4884]: E1210 02:00:50.287988 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:00:51 crc kubenswrapper[4884]: I1210 02:00:51.300002 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07aa430b-2786-4743-b87c-bc702c87675c" path="/var/lib/kubelet/pods/07aa430b-2786-4743-b87c-bc702c87675c/volumes" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.154986 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29422201-mbv85"] Dec 10 02:01:00 crc kubenswrapper[4884]: E1210 02:01:00.156355 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07aa430b-2786-4743-b87c-bc702c87675c" containerName="container-00" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.156381 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="07aa430b-2786-4743-b87c-bc702c87675c" containerName="container-00" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.156831 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="07aa430b-2786-4743-b87c-bc702c87675c" containerName="container-00" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.157988 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.166116 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29422201-mbv85"] Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.278836 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-combined-ca-bundle\") pod \"keystone-cron-29422201-mbv85\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.278977 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-config-data\") pod \"keystone-cron-29422201-mbv85\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.279039 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-fernet-keys\") pod \"keystone-cron-29422201-mbv85\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.279082 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4s5t\" (UniqueName: \"kubernetes.io/projected/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-kube-api-access-s4s5t\") pod \"keystone-cron-29422201-mbv85\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.380776 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-config-data\") pod \"keystone-cron-29422201-mbv85\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.380865 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-fernet-keys\") pod \"keystone-cron-29422201-mbv85\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.380905 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4s5t\" (UniqueName: \"kubernetes.io/projected/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-kube-api-access-s4s5t\") pod \"keystone-cron-29422201-mbv85\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.380987 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-combined-ca-bundle\") pod \"keystone-cron-29422201-mbv85\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.389415 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-combined-ca-bundle\") pod \"keystone-cron-29422201-mbv85\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.389509 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-config-data\") pod \"keystone-cron-29422201-mbv85\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.389873 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-fernet-keys\") pod \"keystone-cron-29422201-mbv85\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.403850 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4s5t\" (UniqueName: \"kubernetes.io/projected/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-kube-api-access-s4s5t\") pod \"keystone-cron-29422201-mbv85\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:00 crc kubenswrapper[4884]: I1210 02:01:00.479600 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:01 crc kubenswrapper[4884]: I1210 02:01:01.019366 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29422201-mbv85"] Dec 10 02:01:01 crc kubenswrapper[4884]: W1210 02:01:01.282714 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d568dea_c34e_4115_9fc5_dd1e0fd8aa05.slice/crio-94960b1a1d8a84bbae695177bdad08822bb1fc8dda6c02f0e902289a49f901ce WatchSource:0}: Error finding container 94960b1a1d8a84bbae695177bdad08822bb1fc8dda6c02f0e902289a49f901ce: Status 404 returned error can't find the container with id 94960b1a1d8a84bbae695177bdad08822bb1fc8dda6c02f0e902289a49f901ce Dec 10 02:01:01 crc kubenswrapper[4884]: I1210 02:01:01.937150 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422201-mbv85" event={"ID":"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05","Type":"ContainerStarted","Data":"6f71b67f59559c04031032c88139c9a107da9c282e5202d452102473a1b49ed5"} Dec 10 02:01:01 crc kubenswrapper[4884]: I1210 02:01:01.937611 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422201-mbv85" event={"ID":"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05","Type":"ContainerStarted","Data":"94960b1a1d8a84bbae695177bdad08822bb1fc8dda6c02f0e902289a49f901ce"} Dec 10 02:01:01 crc kubenswrapper[4884]: I1210 02:01:01.959851 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29422201-mbv85" podStartSLOduration=1.959822451 podStartE2EDuration="1.959822451s" podCreationTimestamp="2025-12-10 02:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-10 02:01:01.955508134 +0000 UTC m=+5435.033465311" watchObservedRunningTime="2025-12-10 02:01:01.959822451 +0000 UTC m=+5435.037779608" Dec 10 02:01:03 crc kubenswrapper[4884]: E1210 02:01:03.290710 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:01:03 crc kubenswrapper[4884]: I1210 02:01:03.964029 4884 generic.go:334] "Generic (PLEG): container finished" podID="8d568dea-c34e-4115-9fc5-dd1e0fd8aa05" containerID="6f71b67f59559c04031032c88139c9a107da9c282e5202d452102473a1b49ed5" exitCode=0 Dec 10 02:01:03 crc kubenswrapper[4884]: I1210 02:01:03.964082 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422201-mbv85" event={"ID":"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05","Type":"ContainerDied","Data":"6f71b67f59559c04031032c88139c9a107da9c282e5202d452102473a1b49ed5"} Dec 10 02:01:04 crc kubenswrapper[4884]: E1210 02:01:04.290331 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.409283 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.507115 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-fernet-keys\") pod \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.507478 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-config-data\") pod \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.507556 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4s5t\" (UniqueName: \"kubernetes.io/projected/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-kube-api-access-s4s5t\") pod \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.507692 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-combined-ca-bundle\") pod \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\" (UID: \"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05\") " Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.513888 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "8d568dea-c34e-4115-9fc5-dd1e0fd8aa05" (UID: "8d568dea-c34e-4115-9fc5-dd1e0fd8aa05"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.515884 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-kube-api-access-s4s5t" (OuterVolumeSpecName: "kube-api-access-s4s5t") pod "8d568dea-c34e-4115-9fc5-dd1e0fd8aa05" (UID: "8d568dea-c34e-4115-9fc5-dd1e0fd8aa05"). InnerVolumeSpecName "kube-api-access-s4s5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.538183 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8d568dea-c34e-4115-9fc5-dd1e0fd8aa05" (UID: "8d568dea-c34e-4115-9fc5-dd1e0fd8aa05"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.585107 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-config-data" (OuterVolumeSpecName: "config-data") pod "8d568dea-c34e-4115-9fc5-dd1e0fd8aa05" (UID: "8d568dea-c34e-4115-9fc5-dd1e0fd8aa05"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.610392 4884 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.610425 4884 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.610448 4884 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-config-data\") on node \"crc\" DevicePath \"\"" Dec 10 02:01:05 crc kubenswrapper[4884]: I1210 02:01:05.610459 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4s5t\" (UniqueName: \"kubernetes.io/projected/8d568dea-c34e-4115-9fc5-dd1e0fd8aa05-kube-api-access-s4s5t\") on node \"crc\" DevicePath \"\"" Dec 10 02:01:06 crc kubenswrapper[4884]: I1210 02:01:06.008177 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29422201-mbv85" event={"ID":"8d568dea-c34e-4115-9fc5-dd1e0fd8aa05","Type":"ContainerDied","Data":"94960b1a1d8a84bbae695177bdad08822bb1fc8dda6c02f0e902289a49f901ce"} Dec 10 02:01:06 crc kubenswrapper[4884]: I1210 02:01:06.008236 4884 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94960b1a1d8a84bbae695177bdad08822bb1fc8dda6c02f0e902289a49f901ce" Dec 10 02:01:06 crc kubenswrapper[4884]: I1210 02:01:06.008315 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29422201-mbv85" Dec 10 02:01:15 crc kubenswrapper[4884]: I1210 02:01:15.289994 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 02:01:15 crc kubenswrapper[4884]: E1210 02:01:15.397547 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 02:01:15 crc kubenswrapper[4884]: E1210 02:01:15.397605 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 02:01:15 crc kubenswrapper[4884]: E1210 02:01:15.397712 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 02:01:15 crc kubenswrapper[4884]: E1210 02:01:15.398975 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:01:18 crc kubenswrapper[4884]: I1210 02:01:18.098603 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 02:01:18 crc kubenswrapper[4884]: I1210 02:01:18.099178 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 02:01:18 crc kubenswrapper[4884]: E1210 02:01:18.291081 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:01:30 crc kubenswrapper[4884]: E1210 02:01:30.289373 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:01:30 crc kubenswrapper[4884]: E1210 02:01:30.289823 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.404931 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jg5gn"] Dec 10 02:01:39 crc kubenswrapper[4884]: E1210 02:01:39.406042 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d568dea-c34e-4115-9fc5-dd1e0fd8aa05" containerName="keystone-cron" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.406059 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d568dea-c34e-4115-9fc5-dd1e0fd8aa05" containerName="keystone-cron" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.406392 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d568dea-c34e-4115-9fc5-dd1e0fd8aa05" containerName="keystone-cron" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.424319 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.477895 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jg5gn"] Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.508910 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a301df2a-d84a-4216-b904-8691cab27c3e-catalog-content\") pod \"community-operators-jg5gn\" (UID: \"a301df2a-d84a-4216-b904-8691cab27c3e\") " pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.509260 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a301df2a-d84a-4216-b904-8691cab27c3e-utilities\") pod \"community-operators-jg5gn\" (UID: \"a301df2a-d84a-4216-b904-8691cab27c3e\") " pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.509455 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkqjs\" (UniqueName: \"kubernetes.io/projected/a301df2a-d84a-4216-b904-8691cab27c3e-kube-api-access-zkqjs\") pod \"community-operators-jg5gn\" (UID: \"a301df2a-d84a-4216-b904-8691cab27c3e\") " pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.610872 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a301df2a-d84a-4216-b904-8691cab27c3e-catalog-content\") pod \"community-operators-jg5gn\" (UID: \"a301df2a-d84a-4216-b904-8691cab27c3e\") " pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.610965 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a301df2a-d84a-4216-b904-8691cab27c3e-utilities\") pod \"community-operators-jg5gn\" (UID: \"a301df2a-d84a-4216-b904-8691cab27c3e\") " pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.611066 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkqjs\" (UniqueName: \"kubernetes.io/projected/a301df2a-d84a-4216-b904-8691cab27c3e-kube-api-access-zkqjs\") pod \"community-operators-jg5gn\" (UID: \"a301df2a-d84a-4216-b904-8691cab27c3e\") " pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.611846 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a301df2a-d84a-4216-b904-8691cab27c3e-catalog-content\") pod \"community-operators-jg5gn\" (UID: \"a301df2a-d84a-4216-b904-8691cab27c3e\") " pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.611862 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a301df2a-d84a-4216-b904-8691cab27c3e-utilities\") pod \"community-operators-jg5gn\" (UID: \"a301df2a-d84a-4216-b904-8691cab27c3e\") " pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.635735 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkqjs\" (UniqueName: \"kubernetes.io/projected/a301df2a-d84a-4216-b904-8691cab27c3e-kube-api-access-zkqjs\") pod \"community-operators-jg5gn\" (UID: \"a301df2a-d84a-4216-b904-8691cab27c3e\") " pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:39 crc kubenswrapper[4884]: I1210 02:01:39.775970 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:40 crc kubenswrapper[4884]: I1210 02:01:40.289877 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jg5gn"] Dec 10 02:01:40 crc kubenswrapper[4884]: W1210 02:01:40.294233 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda301df2a_d84a_4216_b904_8691cab27c3e.slice/crio-20a26f4f705d24d5bed2712bbf3df8dcf86510cf30ede92932990ec7fbcc77ad WatchSource:0}: Error finding container 20a26f4f705d24d5bed2712bbf3df8dcf86510cf30ede92932990ec7fbcc77ad: Status 404 returned error can't find the container with id 20a26f4f705d24d5bed2712bbf3df8dcf86510cf30ede92932990ec7fbcc77ad Dec 10 02:01:40 crc kubenswrapper[4884]: I1210 02:01:40.493403 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jg5gn" event={"ID":"a301df2a-d84a-4216-b904-8691cab27c3e","Type":"ContainerStarted","Data":"20a26f4f705d24d5bed2712bbf3df8dcf86510cf30ede92932990ec7fbcc77ad"} Dec 10 02:01:41 crc kubenswrapper[4884]: E1210 02:01:41.289330 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:01:41 crc kubenswrapper[4884]: I1210 02:01:41.503604 4884 generic.go:334] "Generic (PLEG): container finished" podID="a301df2a-d84a-4216-b904-8691cab27c3e" containerID="9dce431a91d1025b4e87d14b08074e3f16aba2f70620fac2d853c28da2e11511" exitCode=0 Dec 10 02:01:41 crc kubenswrapper[4884]: I1210 02:01:41.503654 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jg5gn" event={"ID":"a301df2a-d84a-4216-b904-8691cab27c3e","Type":"ContainerDied","Data":"9dce431a91d1025b4e87d14b08074e3f16aba2f70620fac2d853c28da2e11511"} Dec 10 02:01:42 crc kubenswrapper[4884]: I1210 02:01:42.503002 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_81aa3793-6c68-4ffc-b413-293b268d2ccd/aodh-api/0.log" Dec 10 02:01:42 crc kubenswrapper[4884]: I1210 02:01:42.694282 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_81aa3793-6c68-4ffc-b413-293b268d2ccd/aodh-listener/0.log" Dec 10 02:01:42 crc kubenswrapper[4884]: I1210 02:01:42.728725 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_81aa3793-6c68-4ffc-b413-293b268d2ccd/aodh-evaluator/0.log" Dec 10 02:01:42 crc kubenswrapper[4884]: I1210 02:01:42.741331 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_81aa3793-6c68-4ffc-b413-293b268d2ccd/aodh-notifier/0.log" Dec 10 02:01:42 crc kubenswrapper[4884]: I1210 02:01:42.875077 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7844c4d96d-4v75g_fd39a161-40d7-4f62-825a-9f022cd6d32e/barbican-api/0.log" Dec 10 02:01:42 crc kubenswrapper[4884]: I1210 02:01:42.924557 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7844c4d96d-4v75g_fd39a161-40d7-4f62-825a-9f022cd6d32e/barbican-api-log/0.log" Dec 10 02:01:43 crc kubenswrapper[4884]: I1210 02:01:43.024257 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-67b785fd9b-fkshr_339b9f5b-e372-4d2c-b939-3dabc25eef48/barbican-keystone-listener/0.log" Dec 10 02:01:43 crc kubenswrapper[4884]: I1210 02:01:43.301811 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-67b785fd9b-fkshr_339b9f5b-e372-4d2c-b939-3dabc25eef48/barbican-keystone-listener-log/0.log" Dec 10 02:01:43 crc kubenswrapper[4884]: I1210 02:01:43.380730 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7fffddc689-blxpq_c921edd1-2c65-40c6-b946-df2ce3a6ed64/barbican-worker-log/0.log" Dec 10 02:01:43 crc kubenswrapper[4884]: I1210 02:01:43.381899 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7fffddc689-blxpq_c921edd1-2c65-40c6-b946-df2ce3a6ed64/barbican-worker/0.log" Dec 10 02:01:43 crc kubenswrapper[4884]: E1210 02:01:43.382403 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 02:01:43 crc kubenswrapper[4884]: E1210 02:01:43.382456 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 02:01:43 crc kubenswrapper[4884]: E1210 02:01:43.382570 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 02:01:43 crc kubenswrapper[4884]: E1210 02:01:43.383911 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:01:43 crc kubenswrapper[4884]: I1210 02:01:43.559708 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-59pmb_1a04a509-b2c8-4fd6-8443-24aa317b7eb8/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:43 crc kubenswrapper[4884]: I1210 02:01:43.764278 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3ca9531f-74d6-4baa-aca5-f734f006210b/ceilometer-notification-agent/0.log" Dec 10 02:01:43 crc kubenswrapper[4884]: I1210 02:01:43.810978 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3ca9531f-74d6-4baa-aca5-f734f006210b/proxy-httpd/0.log" Dec 10 02:01:43 crc kubenswrapper[4884]: I1210 02:01:43.841708 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3ca9531f-74d6-4baa-aca5-f734f006210b/sg-core/0.log" Dec 10 02:01:43 crc kubenswrapper[4884]: I1210 02:01:43.956281 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-hci-pre-edpm-deployment-openstack-edpm-ipam-dqlzm_27cdea1b-8993-44d6-80f9-a4f46413a746/ceph-hci-pre-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:44 crc kubenswrapper[4884]: I1210 02:01:44.120063 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_ad703bf4-2568-4b7d-939d-cacee6ded9b3/cinder-api-log/0.log" Dec 10 02:01:44 crc kubenswrapper[4884]: I1210 02:01:44.146663 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_ad703bf4-2568-4b7d-939d-cacee6ded9b3/cinder-api/0.log" Dec 10 02:01:44 crc kubenswrapper[4884]: I1210 02:01:44.346160 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_ab33c12a-0d2e-4af7-a3a6-4069372a49a6/cinder-scheduler/0.log" Dec 10 02:01:44 crc kubenswrapper[4884]: I1210 02:01:44.397756 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_ab33c12a-0d2e-4af7-a3a6-4069372a49a6/probe/0.log" Dec 10 02:01:44 crc kubenswrapper[4884]: I1210 02:01:44.531692 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jg5gn" event={"ID":"a301df2a-d84a-4216-b904-8691cab27c3e","Type":"ContainerStarted","Data":"96a637ef91040b78451e4baac323f5c368953516de9985503cfd5c64ce040c6d"} Dec 10 02:01:44 crc kubenswrapper[4884]: I1210 02:01:44.942723 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5cf7b6cbf7-7jxlk_2ee52c8c-feab-476d-844c-19e007cf6e40/init/0.log" Dec 10 02:01:44 crc kubenswrapper[4884]: I1210 02:01:44.963730 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-cz9ql_ebb9744c-67b7-4ffb-8b42-88feca31263f/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:44 crc kubenswrapper[4884]: I1210 02:01:44.966522 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-qr8cx_e732d264-88f6-46db-9eff-c7fb0b13e791/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:45 crc kubenswrapper[4884]: I1210 02:01:45.255943 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5cf7b6cbf7-7jxlk_2ee52c8c-feab-476d-844c-19e007cf6e40/dnsmasq-dns/0.log" Dec 10 02:01:45 crc kubenswrapper[4884]: I1210 02:01:45.350817 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5cf7b6cbf7-7jxlk_2ee52c8c-feab-476d-844c-19e007cf6e40/init/0.log" Dec 10 02:01:45 crc kubenswrapper[4884]: I1210 02:01:45.546816 4884 generic.go:334] "Generic (PLEG): container finished" podID="a301df2a-d84a-4216-b904-8691cab27c3e" containerID="96a637ef91040b78451e4baac323f5c368953516de9985503cfd5c64ce040c6d" exitCode=0 Dec 10 02:01:45 crc kubenswrapper[4884]: I1210 02:01:45.547157 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jg5gn" event={"ID":"a301df2a-d84a-4216-b904-8691cab27c3e","Type":"ContainerDied","Data":"96a637ef91040b78451e4baac323f5c368953516de9985503cfd5c64ce040c6d"} Dec 10 02:01:45 crc kubenswrapper[4884]: I1210 02:01:45.943941 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-c847d79b6-kwhvk_d5143d7b-5804-4dc4-b1a5-7e6ffe2568e9/heat-api/0.log" Dec 10 02:01:46 crc kubenswrapper[4884]: I1210 02:01:46.052668 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-56f5c7d86c-hthhq_644adf64-d8e7-40ff-8075-2f9595faa559/heat-cfnapi/0.log" Dec 10 02:01:46 crc kubenswrapper[4884]: I1210 02:01:46.236577 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-6c876d64f-4c6rh_c15434d2-166c-4919-ac7f-99ef8bf909b4/heat-engine/0.log" Dec 10 02:01:46 crc kubenswrapper[4884]: I1210 02:01:46.520186 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-swpbf_d2403845-7f66-49c3-8f0b-fd8bad801b5a/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:46 crc kubenswrapper[4884]: I1210 02:01:46.819615 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29422141-fmvp7_7c8db774-d53a-4a89-87fa-9863b4b73e5a/keystone-cron/0.log" Dec 10 02:01:46 crc kubenswrapper[4884]: I1210 02:01:46.832027 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-6fmlx_47f1f2ff-140b-4a3c-b810-f25f60bf466f/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:46 crc kubenswrapper[4884]: I1210 02:01:46.908077 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-78b6b775cc-bbrl7_c10e9f86-3844-4b7e-954f-a2f868c86b35/keystone-api/0.log" Dec 10 02:01:46 crc kubenswrapper[4884]: I1210 02:01:46.971159 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29422201-mbv85_8d568dea-c34e-4115-9fc5-dd1e0fd8aa05/keystone-cron/0.log" Dec 10 02:01:47 crc kubenswrapper[4884]: I1210 02:01:47.078721 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_a68f803e-f637-4336-af9c-04110737be37/kube-state-metrics/0.log" Dec 10 02:01:47 crc kubenswrapper[4884]: I1210 02:01:47.154902 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-9gmbm_93f31477-32e4-4873-95f9-43327b02f0c8/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:47 crc kubenswrapper[4884]: I1210 02:01:47.405272 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mysqld-exporter-0_c230f916-e118-40c7-9696-ee437fc34468/mysqld-exporter/0.log" Dec 10 02:01:47 crc kubenswrapper[4884]: I1210 02:01:47.571651 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7656cd6689-46nqx_f5135453-5688-4022-b536-cdd8c5e62926/neutron-api/0.log" Dec 10 02:01:47 crc kubenswrapper[4884]: I1210 02:01:47.572695 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jg5gn" event={"ID":"a301df2a-d84a-4216-b904-8691cab27c3e","Type":"ContainerStarted","Data":"f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80"} Dec 10 02:01:47 crc kubenswrapper[4884]: I1210 02:01:47.599413 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jg5gn" podStartSLOduration=3.191443946 podStartE2EDuration="8.599393858s" podCreationTimestamp="2025-12-10 02:01:39 +0000 UTC" firstStartedPulling="2025-12-10 02:01:41.505238558 +0000 UTC m=+5474.583195675" lastFinishedPulling="2025-12-10 02:01:46.91318847 +0000 UTC m=+5479.991145587" observedRunningTime="2025-12-10 02:01:47.590846079 +0000 UTC m=+5480.668803206" watchObservedRunningTime="2025-12-10 02:01:47.599393858 +0000 UTC m=+5480.677350975" Dec 10 02:01:47 crc kubenswrapper[4884]: I1210 02:01:47.620728 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7656cd6689-46nqx_f5135453-5688-4022-b536-cdd8c5e62926/neutron-httpd/0.log" Dec 10 02:01:48 crc kubenswrapper[4884]: I1210 02:01:48.093207 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_63998bed-5250-4dc9-a161-3d9ceaf54e5c/nova-cell0-conductor-conductor/0.log" Dec 10 02:01:48 crc kubenswrapper[4884]: I1210 02:01:48.098204 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 02:01:48 crc kubenswrapper[4884]: I1210 02:01:48.098271 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 02:01:48 crc kubenswrapper[4884]: I1210 02:01:48.123891 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_33298def-a273-4c2a-bc65-32dc50928a1a/nova-api-log/0.log" Dec 10 02:01:48 crc kubenswrapper[4884]: I1210 02:01:48.227526 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_33298def-a273-4c2a-bc65-32dc50928a1a/nova-api-api/0.log" Dec 10 02:01:48 crc kubenswrapper[4884]: I1210 02:01:48.429556 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_e77e5c1a-be83-4f84-8454-2e4cd443c2dc/nova-cell1-conductor-conductor/0.log" Dec 10 02:01:48 crc kubenswrapper[4884]: I1210 02:01:48.570621 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_453881c1-b2d7-4b85-a686-68711bc65917/nova-cell1-novncproxy-novncproxy/0.log" Dec 10 02:01:48 crc kubenswrapper[4884]: I1210 02:01:48.715545 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_112d4b11-3fd1-4277-b2ec-8b87cae86c10/nova-metadata-log/0.log" Dec 10 02:01:48 crc kubenswrapper[4884]: I1210 02:01:48.937979 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_319b944f-ffe5-4b53-90c3-412bf9c8c818/nova-scheduler-scheduler/0.log" Dec 10 02:01:49 crc kubenswrapper[4884]: I1210 02:01:49.033462 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_d089c5ef-75b6-480e-b726-abd349a291cc/mysql-bootstrap/0.log" Dec 10 02:01:49 crc kubenswrapper[4884]: I1210 02:01:49.181216 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_d089c5ef-75b6-480e-b726-abd349a291cc/mysql-bootstrap/0.log" Dec 10 02:01:49 crc kubenswrapper[4884]: I1210 02:01:49.197340 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_d089c5ef-75b6-480e-b726-abd349a291cc/galera/0.log" Dec 10 02:01:49 crc kubenswrapper[4884]: I1210 02:01:49.388953 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_30c1a0f0-5abf-4fac-89c9-afedea695fab/mysql-bootstrap/0.log" Dec 10 02:01:49 crc kubenswrapper[4884]: I1210 02:01:49.681265 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_30c1a0f0-5abf-4fac-89c9-afedea695fab/mysql-bootstrap/0.log" Dec 10 02:01:49 crc kubenswrapper[4884]: I1210 02:01:49.709702 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_30c1a0f0-5abf-4fac-89c9-afedea695fab/galera/0.log" Dec 10 02:01:49 crc kubenswrapper[4884]: I1210 02:01:49.776922 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:49 crc kubenswrapper[4884]: I1210 02:01:49.776957 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:49 crc kubenswrapper[4884]: I1210 02:01:49.820815 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_b215a749-e579-4854-a2f7-ebaf6c3416a8/openstackclient/0.log" Dec 10 02:01:49 crc kubenswrapper[4884]: I1210 02:01:49.827963 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:49 crc kubenswrapper[4884]: I1210 02:01:49.985031 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-577tv_b040b499-55d1-4173-bcfe-8e0100eed4b0/ovn-controller/0.log" Dec 10 02:01:50 crc kubenswrapper[4884]: I1210 02:01:50.162325 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-wsrpp_655ff7a7-35ba-4042-b170-f9de53553510/openstack-network-exporter/0.log" Dec 10 02:01:50 crc kubenswrapper[4884]: I1210 02:01:50.331032 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-m54sx_fe96e3a4-d720-400e-9956-a5cda8c377d6/ovsdb-server-init/0.log" Dec 10 02:01:50 crc kubenswrapper[4884]: I1210 02:01:50.514489 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-m54sx_fe96e3a4-d720-400e-9956-a5cda8c377d6/ovsdb-server-init/0.log" Dec 10 02:01:50 crc kubenswrapper[4884]: I1210 02:01:50.539281 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-m54sx_fe96e3a4-d720-400e-9956-a5cda8c377d6/ovsdb-server/0.log" Dec 10 02:01:50 crc kubenswrapper[4884]: I1210 02:01:50.553366 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_112d4b11-3fd1-4277-b2ec-8b87cae86c10/nova-metadata-metadata/0.log" Dec 10 02:01:50 crc kubenswrapper[4884]: I1210 02:01:50.565019 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-m54sx_fe96e3a4-d720-400e-9956-a5cda8c377d6/ovs-vswitchd/0.log" Dec 10 02:01:50 crc kubenswrapper[4884]: I1210 02:01:50.765831 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-h92sf_14b622f8-c484-476b-8024-8c1afeef15c2/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:50 crc kubenswrapper[4884]: I1210 02:01:50.828887 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_785aab89-2566-4c6f-a2b6-58207021cf39/openstack-network-exporter/0.log" Dec 10 02:01:50 crc kubenswrapper[4884]: I1210 02:01:50.937671 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_785aab89-2566-4c6f-a2b6-58207021cf39/ovn-northd/0.log" Dec 10 02:01:51 crc kubenswrapper[4884]: I1210 02:01:51.026552 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_ffe4f087-ebb2-404d-bdc9-fe508c624b82/openstack-network-exporter/0.log" Dec 10 02:01:51 crc kubenswrapper[4884]: I1210 02:01:51.087905 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_ffe4f087-ebb2-404d-bdc9-fe508c624b82/ovsdbserver-nb/0.log" Dec 10 02:01:51 crc kubenswrapper[4884]: I1210 02:01:51.240958 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_7ba7b2e0-e414-4f73-b4b5-ffc5251a2709/openstack-network-exporter/0.log" Dec 10 02:01:51 crc kubenswrapper[4884]: I1210 02:01:51.241386 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_7ba7b2e0-e414-4f73-b4b5-ffc5251a2709/ovsdbserver-sb/0.log" Dec 10 02:01:51 crc kubenswrapper[4884]: I1210 02:01:51.439291 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-c95474448-hcdj6_fb9dbf85-546d-418a-a001-c75e5817e1b7/placement-api/0.log" Dec 10 02:01:51 crc kubenswrapper[4884]: I1210 02:01:51.579018 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-c95474448-hcdj6_fb9dbf85-546d-418a-a001-c75e5817e1b7/placement-log/0.log" Dec 10 02:01:51 crc kubenswrapper[4884]: I1210 02:01:51.581957 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_9879cc81-0cad-4e77-90e5-46afd9adb241/init-config-reloader/0.log" Dec 10 02:01:51 crc kubenswrapper[4884]: I1210 02:01:51.782772 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_9879cc81-0cad-4e77-90e5-46afd9adb241/prometheus/0.log" Dec 10 02:01:51 crc kubenswrapper[4884]: I1210 02:01:51.820743 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_9879cc81-0cad-4e77-90e5-46afd9adb241/init-config-reloader/0.log" Dec 10 02:01:51 crc kubenswrapper[4884]: I1210 02:01:51.821006 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_9879cc81-0cad-4e77-90e5-46afd9adb241/thanos-sidecar/0.log" Dec 10 02:01:51 crc kubenswrapper[4884]: I1210 02:01:51.827247 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_9879cc81-0cad-4e77-90e5-46afd9adb241/config-reloader/0.log" Dec 10 02:01:51 crc kubenswrapper[4884]: I1210 02:01:51.989174 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_1a7f3ac8-d53a-444b-94c8-0ea465ea74b8/setup-container/0.log" Dec 10 02:01:52 crc kubenswrapper[4884]: I1210 02:01:52.214612 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_1a7f3ac8-d53a-444b-94c8-0ea465ea74b8/setup-container/0.log" Dec 10 02:01:52 crc kubenswrapper[4884]: I1210 02:01:52.228211 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_1a7f3ac8-d53a-444b-94c8-0ea465ea74b8/rabbitmq/0.log" Dec 10 02:01:52 crc kubenswrapper[4884]: I1210 02:01:52.308516 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4/setup-container/0.log" Dec 10 02:01:52 crc kubenswrapper[4884]: I1210 02:01:52.485075 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4/setup-container/0.log" Dec 10 02:01:52 crc kubenswrapper[4884]: I1210 02:01:52.577362 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_834c403c-b8cf-4cbc-ae4b-50a4dbe7ccc4/rabbitmq/0.log" Dec 10 02:01:52 crc kubenswrapper[4884]: I1210 02:01:52.585122 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-8jdkl_acc4e8e3-089c-4bb1-935a-f1597f0e973c/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:52 crc kubenswrapper[4884]: I1210 02:01:52.841264 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-9x4vp_357a1ded-5f10-42db-a1d4-ed63c8297d3b/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:52 crc kubenswrapper[4884]: I1210 02:01:52.843122 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-57rln_b3dddf04-7667-4d75-8532-12a3deb1e77c/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.102521 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-glgsr_e3b40db4-283d-45ba-85c0-31e649ccdfb2/ssh-known-hosts-edpm-deployment/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.200112 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-8567bf5cf5-sh595_b9b2136d-c328-4096-b478-ad8836adf843/proxy-server/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.258148 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-8567bf5cf5-sh595_b9b2136d-c328-4096-b478-ad8836adf843/proxy-httpd/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.342081 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-t7f78_e96bb397-11f0-4e24-aafc-9f399d5846b8/swift-ring-rebalance/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.464697 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/account-auditor/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.551653 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/account-reaper/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.598663 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/account-replicator/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.664197 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/account-server/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.674386 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/container-auditor/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.797955 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/container-replicator/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.859010 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/container-updater/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.866013 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/container-server/0.log" Dec 10 02:01:53 crc kubenswrapper[4884]: I1210 02:01:53.919122 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/object-auditor/0.log" Dec 10 02:01:54 crc kubenswrapper[4884]: I1210 02:01:54.063914 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/object-expirer/0.log" Dec 10 02:01:54 crc kubenswrapper[4884]: I1210 02:01:54.072135 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/object-server/0.log" Dec 10 02:01:54 crc kubenswrapper[4884]: I1210 02:01:54.100097 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/object-replicator/0.log" Dec 10 02:01:54 crc kubenswrapper[4884]: I1210 02:01:54.165793 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/object-updater/0.log" Dec 10 02:01:54 crc kubenswrapper[4884]: E1210 02:01:54.290679 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:01:54 crc kubenswrapper[4884]: I1210 02:01:54.297453 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/swift-recon-cron/0.log" Dec 10 02:01:54 crc kubenswrapper[4884]: I1210 02:01:54.304599 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_77e9a322-ea03-4101-b8be-d1e09f67e8c2/rsync/0.log" Dec 10 02:01:54 crc kubenswrapper[4884]: I1210 02:01:54.417093 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-582t9_a14d2c3e-ab42-422b-9e3e-e716a86dfe8b/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:54 crc kubenswrapper[4884]: I1210 02:01:54.516062 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-9n8c2_54624723-55f9-4b1e-8d32-66fc274ff7af/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:54 crc kubenswrapper[4884]: I1210 02:01:54.688852 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-gwzdq_8546aaa3-e2d9-4646-a9d5-fe00b5a53e2d/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:54 crc kubenswrapper[4884]: I1210 02:01:54.803671 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-hkxcz_e2755ba3-53ef-4f12-91cf-1c90cde36fab/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:54 crc kubenswrapper[4884]: I1210 02:01:54.902176 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-ngl5p_43fa1b12-f3b7-4bb1-a632-f3f756e29d48/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:55 crc kubenswrapper[4884]: I1210 02:01:55.035524 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-ntfmk_2149c412-a8d9-4bfe-b1b7-e901dcc5d132/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:55 crc kubenswrapper[4884]: I1210 02:01:55.157114 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-wgrpn_71585968-47e5-4291-8945-3af278090bd7/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:55 crc kubenswrapper[4884]: I1210 02:01:55.491021 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-qj9xm_2feb23bd-a4c7-4c90-9793-d2abb8c7c02e/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 10 02:01:56 crc kubenswrapper[4884]: E1210 02:01:56.288815 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:01:59 crc kubenswrapper[4884]: I1210 02:01:59.837219 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:01:59 crc kubenswrapper[4884]: I1210 02:01:59.890100 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jg5gn"] Dec 10 02:02:00 crc kubenswrapper[4884]: I1210 02:02:00.009695 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_fb3db35c-fdef-4e0a-81a3-7c13f3a20649/memcached/0.log" Dec 10 02:02:00 crc kubenswrapper[4884]: I1210 02:02:00.699655 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jg5gn" podUID="a301df2a-d84a-4216-b904-8691cab27c3e" containerName="registry-server" containerID="cri-o://f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80" gracePeriod=2 Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.261126 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.306593 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a301df2a-d84a-4216-b904-8691cab27c3e-catalog-content\") pod \"a301df2a-d84a-4216-b904-8691cab27c3e\" (UID: \"a301df2a-d84a-4216-b904-8691cab27c3e\") " Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.306669 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a301df2a-d84a-4216-b904-8691cab27c3e-utilities\") pod \"a301df2a-d84a-4216-b904-8691cab27c3e\" (UID: \"a301df2a-d84a-4216-b904-8691cab27c3e\") " Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.306853 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkqjs\" (UniqueName: \"kubernetes.io/projected/a301df2a-d84a-4216-b904-8691cab27c3e-kube-api-access-zkqjs\") pod \"a301df2a-d84a-4216-b904-8691cab27c3e\" (UID: \"a301df2a-d84a-4216-b904-8691cab27c3e\") " Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.308259 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a301df2a-d84a-4216-b904-8691cab27c3e-utilities" (OuterVolumeSpecName: "utilities") pod "a301df2a-d84a-4216-b904-8691cab27c3e" (UID: "a301df2a-d84a-4216-b904-8691cab27c3e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.337670 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a301df2a-d84a-4216-b904-8691cab27c3e-kube-api-access-zkqjs" (OuterVolumeSpecName: "kube-api-access-zkqjs") pod "a301df2a-d84a-4216-b904-8691cab27c3e" (UID: "a301df2a-d84a-4216-b904-8691cab27c3e"). InnerVolumeSpecName "kube-api-access-zkqjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.366491 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a301df2a-d84a-4216-b904-8691cab27c3e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a301df2a-d84a-4216-b904-8691cab27c3e" (UID: "a301df2a-d84a-4216-b904-8691cab27c3e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.413913 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a301df2a-d84a-4216-b904-8691cab27c3e-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.413954 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkqjs\" (UniqueName: \"kubernetes.io/projected/a301df2a-d84a-4216-b904-8691cab27c3e-kube-api-access-zkqjs\") on node \"crc\" DevicePath \"\"" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.413966 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a301df2a-d84a-4216-b904-8691cab27c3e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.709046 4884 generic.go:334] "Generic (PLEG): container finished" podID="a301df2a-d84a-4216-b904-8691cab27c3e" containerID="f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80" exitCode=0 Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.709083 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jg5gn" event={"ID":"a301df2a-d84a-4216-b904-8691cab27c3e","Type":"ContainerDied","Data":"f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80"} Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.709107 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jg5gn" event={"ID":"a301df2a-d84a-4216-b904-8691cab27c3e","Type":"ContainerDied","Data":"20a26f4f705d24d5bed2712bbf3df8dcf86510cf30ede92932990ec7fbcc77ad"} Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.709123 4884 scope.go:117] "RemoveContainer" containerID="f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.709236 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jg5gn" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.734856 4884 scope.go:117] "RemoveContainer" containerID="96a637ef91040b78451e4baac323f5c368953516de9985503cfd5c64ce040c6d" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.743779 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jg5gn"] Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.756073 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jg5gn"] Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.765543 4884 scope.go:117] "RemoveContainer" containerID="9dce431a91d1025b4e87d14b08074e3f16aba2f70620fac2d853c28da2e11511" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.822331 4884 scope.go:117] "RemoveContainer" containerID="f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80" Dec 10 02:02:01 crc kubenswrapper[4884]: E1210 02:02:01.823494 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80\": container with ID starting with f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80 not found: ID does not exist" containerID="f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.823612 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80"} err="failed to get container status \"f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80\": rpc error: code = NotFound desc = could not find container \"f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80\": container with ID starting with f295617b20020f9124ebe8fb6e068f913162b6794095de5155ad512e65b2ba80 not found: ID does not exist" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.823694 4884 scope.go:117] "RemoveContainer" containerID="96a637ef91040b78451e4baac323f5c368953516de9985503cfd5c64ce040c6d" Dec 10 02:02:01 crc kubenswrapper[4884]: E1210 02:02:01.824054 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96a637ef91040b78451e4baac323f5c368953516de9985503cfd5c64ce040c6d\": container with ID starting with 96a637ef91040b78451e4baac323f5c368953516de9985503cfd5c64ce040c6d not found: ID does not exist" containerID="96a637ef91040b78451e4baac323f5c368953516de9985503cfd5c64ce040c6d" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.824143 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96a637ef91040b78451e4baac323f5c368953516de9985503cfd5c64ce040c6d"} err="failed to get container status \"96a637ef91040b78451e4baac323f5c368953516de9985503cfd5c64ce040c6d\": rpc error: code = NotFound desc = could not find container \"96a637ef91040b78451e4baac323f5c368953516de9985503cfd5c64ce040c6d\": container with ID starting with 96a637ef91040b78451e4baac323f5c368953516de9985503cfd5c64ce040c6d not found: ID does not exist" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.824217 4884 scope.go:117] "RemoveContainer" containerID="9dce431a91d1025b4e87d14b08074e3f16aba2f70620fac2d853c28da2e11511" Dec 10 02:02:01 crc kubenswrapper[4884]: E1210 02:02:01.824551 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9dce431a91d1025b4e87d14b08074e3f16aba2f70620fac2d853c28da2e11511\": container with ID starting with 9dce431a91d1025b4e87d14b08074e3f16aba2f70620fac2d853c28da2e11511 not found: ID does not exist" containerID="9dce431a91d1025b4e87d14b08074e3f16aba2f70620fac2d853c28da2e11511" Dec 10 02:02:01 crc kubenswrapper[4884]: I1210 02:02:01.824636 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9dce431a91d1025b4e87d14b08074e3f16aba2f70620fac2d853c28da2e11511"} err="failed to get container status \"9dce431a91d1025b4e87d14b08074e3f16aba2f70620fac2d853c28da2e11511\": rpc error: code = NotFound desc = could not find container \"9dce431a91d1025b4e87d14b08074e3f16aba2f70620fac2d853c28da2e11511\": container with ID starting with 9dce431a91d1025b4e87d14b08074e3f16aba2f70620fac2d853c28da2e11511 not found: ID does not exist" Dec 10 02:02:03 crc kubenswrapper[4884]: I1210 02:02:03.300848 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a301df2a-d84a-4216-b904-8691cab27c3e" path="/var/lib/kubelet/pods/a301df2a-d84a-4216-b904-8691cab27c3e/volumes" Dec 10 02:02:05 crc kubenswrapper[4884]: E1210 02:02:05.289919 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:02:09 crc kubenswrapper[4884]: E1210 02:02:09.292046 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:02:18 crc kubenswrapper[4884]: I1210 02:02:18.098220 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 02:02:18 crc kubenswrapper[4884]: I1210 02:02:18.098871 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 02:02:18 crc kubenswrapper[4884]: I1210 02:02:18.098921 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 02:02:18 crc kubenswrapper[4884]: I1210 02:02:18.099973 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e8f4eac03ccca0f0d31a1e5527a9ae76c203b8538de41b60f9d5802efc9a7452"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 02:02:18 crc kubenswrapper[4884]: I1210 02:02:18.100138 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://e8f4eac03ccca0f0d31a1e5527a9ae76c203b8538de41b60f9d5802efc9a7452" gracePeriod=600 Dec 10 02:02:18 crc kubenswrapper[4884]: I1210 02:02:18.907553 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"e8f4eac03ccca0f0d31a1e5527a9ae76c203b8538de41b60f9d5802efc9a7452"} Dec 10 02:02:18 crc kubenswrapper[4884]: I1210 02:02:18.908227 4884 scope.go:117] "RemoveContainer" containerID="a1f53d1ed9b0317448f9ee4cf2ace780328bacf380f5f109768006ec99f248ea" Dec 10 02:02:18 crc kubenswrapper[4884]: I1210 02:02:18.907594 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="e8f4eac03ccca0f0d31a1e5527a9ae76c203b8538de41b60f9d5802efc9a7452" exitCode=0 Dec 10 02:02:19 crc kubenswrapper[4884]: E1210 02:02:19.289000 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:02:19 crc kubenswrapper[4884]: I1210 02:02:19.929253 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804"} Dec 10 02:02:22 crc kubenswrapper[4884]: I1210 02:02:22.609228 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5_c5236b27-f7c5-4be5-9cab-a964b2206b70/util/0.log" Dec 10 02:02:22 crc kubenswrapper[4884]: I1210 02:02:22.825753 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5_c5236b27-f7c5-4be5-9cab-a964b2206b70/pull/0.log" Dec 10 02:02:22 crc kubenswrapper[4884]: I1210 02:02:22.827815 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5_c5236b27-f7c5-4be5-9cab-a964b2206b70/pull/0.log" Dec 10 02:02:22 crc kubenswrapper[4884]: I1210 02:02:22.843494 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5_c5236b27-f7c5-4be5-9cab-a964b2206b70/util/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.097679 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5_c5236b27-f7c5-4be5-9cab-a964b2206b70/pull/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.104936 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5_c5236b27-f7c5-4be5-9cab-a964b2206b70/extract/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.138121 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_89754aa608e9bb4c4e06d3e622fcb143c462f01c473469e9511e35e0fb2fhr5_c5236b27-f7c5-4be5-9cab-a964b2206b70/util/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.262459 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-w6tvz_c88f7e0d-d880-42ce-96fd-a1d1ec7be33f/kube-rbac-proxy/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: E1210 02:02:24.291043 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.343151 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-w6tvz_c88f7e0d-d880-42ce-96fd-a1d1ec7be33f/manager/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.391211 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-twll8_c355d3e8-66ee-46b5-8979-d94efb631d6a/kube-rbac-proxy/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.467248 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6c677c69b-twll8_c355d3e8-66ee-46b5-8979-d94efb631d6a/manager/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.535115 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-gdkh6_6953ae1f-46db-410e-b79a-7eff9b687850/kube-rbac-proxy/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.572284 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-697fb699cf-gdkh6_6953ae1f-46db-410e-b79a-7eff9b687850/manager/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.709665 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-84wnx_c74b9b64-5f7c-462a-85e9-a7eacaf2824e/kube-rbac-proxy/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.768524 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-5697bb5779-84wnx_c74b9b64-5f7c-462a-85e9-a7eacaf2824e/manager/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.863583 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-w9xfr_203fedfb-ba93-4cb0-afd5-a01607b4f40d/kube-rbac-proxy/0.log" Dec 10 02:02:24 crc kubenswrapper[4884]: I1210 02:02:24.997917 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-wwrh9_4e506cfc-ad57-47e4-91ff-c4779cec4258/kube-rbac-proxy/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.029906 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-w9xfr_203fedfb-ba93-4cb0-afd5-a01607b4f40d/manager/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.070991 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-wwrh9_4e506cfc-ad57-47e4-91ff-c4779cec4258/manager/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.219927 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-gxgfp_4206f75a-b9be-4d84-806b-0dea3aab1823/kube-rbac-proxy/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.299346 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-5zl6k_53fab550-e4ee-4601-bc21-e93b60ee3788/kube-rbac-proxy/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.438124 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-967d97867-5zl6k_53fab550-e4ee-4601-bc21-e93b60ee3788/manager/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.510295 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-78d48bff9d-gxgfp_4206f75a-b9be-4d84-806b-0dea3aab1823/manager/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.535516 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-t95cb_e9ad1d55-9cdf-42bd-89c4-47a4ad0150db/kube-rbac-proxy/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.616781 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-t95cb_e9ad1d55-9cdf-42bd-89c4-47a4ad0150db/manager/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.683761 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-j554b_4309c558-54b8-4034-85db-c4ca159600ad/kube-rbac-proxy/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.727945 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5b5fd79c9c-j554b_4309c558-54b8-4034-85db-c4ca159600ad/manager/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.817580 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-gb69x_9fd7b8b4-5bdd-4d3c-859e-78ba9cced0b6/kube-rbac-proxy/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.872037 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-79c8c4686c-gb69x_9fd7b8b4-5bdd-4d3c-859e-78ba9cced0b6/manager/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.895112 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-4f28g_07197f67-1348-456f-a655-d3a418542e85/kube-rbac-proxy/0.log" Dec 10 02:02:25 crc kubenswrapper[4884]: I1210 02:02:25.978629 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-4f28g_07197f67-1348-456f-a655-d3a418542e85/manager/0.log" Dec 10 02:02:26 crc kubenswrapper[4884]: I1210 02:02:26.095652 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-77grd_c818d687-5ceb-4124-9ca3-be82851aa092/kube-rbac-proxy/0.log" Dec 10 02:02:26 crc kubenswrapper[4884]: I1210 02:02:26.128592 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-77grd_c818d687-5ceb-4124-9ca3-be82851aa092/manager/0.log" Dec 10 02:02:26 crc kubenswrapper[4884]: I1210 02:02:26.193952 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-s5dkz_9295a029-0033-437d-a315-0549c7dc31aa/kube-rbac-proxy/0.log" Dec 10 02:02:26 crc kubenswrapper[4884]: I1210 02:02:26.230263 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-s5dkz_9295a029-0033-437d-a315-0549c7dc31aa/manager/0.log" Dec 10 02:02:26 crc kubenswrapper[4884]: I1210 02:02:26.316316 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fxkb8j_3607d45b-2333-4034-aa0b-830759f88204/kube-rbac-proxy/0.log" Dec 10 02:02:26 crc kubenswrapper[4884]: I1210 02:02:26.328489 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-84b575879fxkb8j_3607d45b-2333-4034-aa0b-830759f88204/manager/0.log" Dec 10 02:02:26 crc kubenswrapper[4884]: I1210 02:02:26.644836 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-bpdr4_394f665f-f71e-4835-be68-15a3172a712e/registry-server/0.log" Dec 10 02:02:26 crc kubenswrapper[4884]: I1210 02:02:26.658071 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-7cccdd5794-k969f_c1bdca51-e4b7-449f-a087-1247a1649701/operator/0.log" Dec 10 02:02:26 crc kubenswrapper[4884]: I1210 02:02:26.744806 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-x65hg_69dc456a-37e7-45e5-8bc0-943cae050bd7/kube-rbac-proxy/0.log" Dec 10 02:02:26 crc kubenswrapper[4884]: I1210 02:02:26.925755 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-x65hg_69dc456a-37e7-45e5-8bc0-943cae050bd7/manager/0.log" Dec 10 02:02:26 crc kubenswrapper[4884]: I1210 02:02:26.934783 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-64fqx_d5ce6700-aa2a-4a81-a4db-214dc5cd0305/kube-rbac-proxy/0.log" Dec 10 02:02:26 crc kubenswrapper[4884]: I1210 02:02:26.971824 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-64fqx_d5ce6700-aa2a-4a81-a4db-214dc5cd0305/manager/0.log" Dec 10 02:02:27 crc kubenswrapper[4884]: I1210 02:02:27.162795 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-h2m6r_0fdff275-feeb-4244-8032-75d06452776a/operator/0.log" Dec 10 02:02:27 crc kubenswrapper[4884]: I1210 02:02:27.216551 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-k7vwz_04af4b93-00cd-4e1a-ad1e-84b438ff9b5f/kube-rbac-proxy/0.log" Dec 10 02:02:27 crc kubenswrapper[4884]: I1210 02:02:27.382454 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6fcddf47c-zdm75_6302d211-8a02-4734-9206-7ff93939d971/kube-rbac-proxy/0.log" Dec 10 02:02:27 crc kubenswrapper[4884]: I1210 02:02:27.383745 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-9d58d64bc-k7vwz_04af4b93-00cd-4e1a-ad1e-84b438ff9b5f/manager/0.log" Dec 10 02:02:27 crc kubenswrapper[4884]: I1210 02:02:27.593658 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-vrrck_25ce6a20-18da-43c1-b3e4-70b2ca9185e0/kube-rbac-proxy/0.log" Dec 10 02:02:27 crc kubenswrapper[4884]: I1210 02:02:27.715165 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-vrrck_25ce6a20-18da-43c1-b3e4-70b2ca9185e0/manager/0.log" Dec 10 02:02:27 crc kubenswrapper[4884]: I1210 02:02:27.797140 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-667bd8d554-cwql6_99111a7e-4213-48ad-aa8e-71205314a433/kube-rbac-proxy/0.log" Dec 10 02:02:27 crc kubenswrapper[4884]: I1210 02:02:27.834227 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6fcddf47c-zdm75_6302d211-8a02-4734-9206-7ff93939d971/manager/0.log" Dec 10 02:02:27 crc kubenswrapper[4884]: I1210 02:02:27.887364 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6d5bb94f9c-zvfmc_effda10d-9f01-4d20-8f0b-23ff781864d2/manager/0.log" Dec 10 02:02:27 crc kubenswrapper[4884]: I1210 02:02:27.956933 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-667bd8d554-cwql6_99111a7e-4213-48ad-aa8e-71205314a433/manager/0.log" Dec 10 02:02:33 crc kubenswrapper[4884]: E1210 02:02:33.288658 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:02:35 crc kubenswrapper[4884]: E1210 02:02:35.291185 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:02:44 crc kubenswrapper[4884]: E1210 02:02:44.290553 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:02:48 crc kubenswrapper[4884]: I1210 02:02:48.237855 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-vcrdk_7bdeeac5-9323-4934-bf1a-10bb6c8c6f86/control-plane-machine-set-operator/0.log" Dec 10 02:02:48 crc kubenswrapper[4884]: I1210 02:02:48.385112 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-v4mzd_fd8e23b3-5b64-47bc-903f-4feb12a34389/kube-rbac-proxy/0.log" Dec 10 02:02:48 crc kubenswrapper[4884]: I1210 02:02:48.458608 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-v4mzd_fd8e23b3-5b64-47bc-903f-4feb12a34389/machine-api-operator/0.log" Dec 10 02:02:49 crc kubenswrapper[4884]: E1210 02:02:49.289850 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:02:56 crc kubenswrapper[4884]: E1210 02:02:56.291226 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:03:01 crc kubenswrapper[4884]: E1210 02:03:01.291644 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:03:03 crc kubenswrapper[4884]: I1210 02:03:03.566076 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-h8t2q_f4fb928c-c33e-4b15-a9ac-a8936ddc4439/cert-manager-controller/0.log" Dec 10 02:03:03 crc kubenswrapper[4884]: I1210 02:03:03.668531 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-mxs96_c9b24f3f-f93f-4514-8162-681c6b3bc0ad/cert-manager-cainjector/0.log" Dec 10 02:03:03 crc kubenswrapper[4884]: I1210 02:03:03.715961 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-v52kp_6c33c936-cf88-4bce-91f1-bc463f39a9ef/cert-manager-webhook/0.log" Dec 10 02:03:10 crc kubenswrapper[4884]: E1210 02:03:10.289850 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:03:13 crc kubenswrapper[4884]: E1210 02:03:13.290316 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:03:17 crc kubenswrapper[4884]: I1210 02:03:17.297746 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-kmxrk_7555d5e8-0cff-4ec4-9b26-dfa45466de89/nmstate-console-plugin/0.log" Dec 10 02:03:17 crc kubenswrapper[4884]: I1210 02:03:17.477661 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-pqmdn_d0acd1d8-d03c-467e-8471-481db90de737/kube-rbac-proxy/0.log" Dec 10 02:03:17 crc kubenswrapper[4884]: I1210 02:03:17.490472 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-pdjzx_9ede9448-a62c-49f3-8b99-69d971a74e83/nmstate-handler/0.log" Dec 10 02:03:17 crc kubenswrapper[4884]: I1210 02:03:17.563192 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-pqmdn_d0acd1d8-d03c-467e-8471-481db90de737/nmstate-metrics/0.log" Dec 10 02:03:17 crc kubenswrapper[4884]: I1210 02:03:17.675208 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-2mw5c_fe41be34-f133-4e4f-81e9-2d58cde00923/nmstate-operator/0.log" Dec 10 02:03:17 crc kubenswrapper[4884]: I1210 02:03:17.779564 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-d4wwf_36ef55f9-d3ad-47fb-99d8-0cfe51482d57/nmstate-webhook/0.log" Dec 10 02:03:25 crc kubenswrapper[4884]: E1210 02:03:25.296040 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:03:28 crc kubenswrapper[4884]: E1210 02:03:28.291020 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:03:33 crc kubenswrapper[4884]: I1210 02:03:33.372659 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-887bbc66c-sjvbc_0495f56c-e0ff-4bb7-861c-21754379af3f/kube-rbac-proxy/0.log" Dec 10 02:03:33 crc kubenswrapper[4884]: I1210 02:03:33.419779 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-887bbc66c-sjvbc_0495f56c-e0ff-4bb7-861c-21754379af3f/manager/0.log" Dec 10 02:03:37 crc kubenswrapper[4884]: E1210 02:03:37.312040 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:03:40 crc kubenswrapper[4884]: E1210 02:03:40.289563 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:03:49 crc kubenswrapper[4884]: I1210 02:03:49.849817 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_cluster-logging-operator-ff9846bd-w84c2_3baeade4-d934-423b-be72-b94c23e737ba/cluster-logging-operator/0.log" Dec 10 02:03:50 crc kubenswrapper[4884]: I1210 02:03:50.040421 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_collector-drtt8_bf56411e-549f-4625-87fb-59932e4e58ed/collector/0.log" Dec 10 02:03:50 crc kubenswrapper[4884]: I1210 02:03:50.132746 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-compactor-0_e5d84713-f630-43e2-9666-86b465112548/loki-compactor/0.log" Dec 10 02:03:50 crc kubenswrapper[4884]: I1210 02:03:50.282107 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-distributor-76cc67bf56-rhlrk_b58ab17f-8141-4a6c-ba11-5409fae236c6/loki-distributor/0.log" Dec 10 02:03:50 crc kubenswrapper[4884]: I1210 02:03:50.390875 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-98d4874d8-j2jj4_165d85c1-40f2-4a21-af8c-ae510a8fe6a1/gateway/0.log" Dec 10 02:03:50 crc kubenswrapper[4884]: I1210 02:03:50.488242 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-98d4874d8-j2jj4_165d85c1-40f2-4a21-af8c-ae510a8fe6a1/opa/0.log" Dec 10 02:03:50 crc kubenswrapper[4884]: I1210 02:03:50.686634 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-98d4874d8-qzbtl_a971900d-5e53-42f1-ac1a-3ec4a99b3d32/gateway/0.log" Dec 10 02:03:50 crc kubenswrapper[4884]: I1210 02:03:50.729219 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-gateway-98d4874d8-qzbtl_a971900d-5e53-42f1-ac1a-3ec4a99b3d32/opa/0.log" Dec 10 02:03:50 crc kubenswrapper[4884]: I1210 02:03:50.882364 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-index-gateway-0_795df1c2-a13d-4196-9246-0ef72ca9d141/loki-index-gateway/0.log" Dec 10 02:03:50 crc kubenswrapper[4884]: I1210 02:03:50.984177 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-ingester-0_68b75b37-1530-4b53-95c9-2c4073c1120b/loki-ingester/0.log" Dec 10 02:03:51 crc kubenswrapper[4884]: I1210 02:03:51.095921 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-querier-5895d59bb8-2qrrl_c21130da-105e-48b9-a89b-a189cb685b5d/loki-querier/0.log" Dec 10 02:03:51 crc kubenswrapper[4884]: I1210 02:03:51.185047 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-logging_logging-loki-query-frontend-84558f7c9f-ggmrc_b4520f02-2919-4b32-96f8-8e23a7c13e6c/loki-query-frontend/0.log" Dec 10 02:03:52 crc kubenswrapper[4884]: E1210 02:03:52.289753 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:03:54 crc kubenswrapper[4884]: E1210 02:03:54.289130 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:04:04 crc kubenswrapper[4884]: E1210 02:04:04.289291 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:04:05 crc kubenswrapper[4884]: E1210 02:04:05.288924 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:04:07 crc kubenswrapper[4884]: I1210 02:04:07.657241 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-wt5kv_cefdd4d8-632f-4912-b381-8d5df050aa30/kube-rbac-proxy/0.log" Dec 10 02:04:07 crc kubenswrapper[4884]: I1210 02:04:07.872997 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-wt5kv_cefdd4d8-632f-4912-b381-8d5df050aa30/controller/0.log" Dec 10 02:04:07 crc kubenswrapper[4884]: I1210 02:04:07.941035 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/cp-frr-files/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.263036 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/cp-frr-files/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.286540 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/cp-reloader/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.311122 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/cp-reloader/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.356862 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/cp-metrics/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.446333 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/cp-frr-files/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.503624 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/cp-metrics/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.507670 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/cp-reloader/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.545713 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/cp-metrics/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.718266 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/cp-frr-files/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.770942 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/cp-reloader/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.771076 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/cp-metrics/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.810675 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/controller/0.log" Dec 10 02:04:08 crc kubenswrapper[4884]: I1210 02:04:08.960143 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/frr-metrics/0.log" Dec 10 02:04:09 crc kubenswrapper[4884]: I1210 02:04:09.042798 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/kube-rbac-proxy/0.log" Dec 10 02:04:09 crc kubenswrapper[4884]: I1210 02:04:09.072737 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/kube-rbac-proxy-frr/0.log" Dec 10 02:04:09 crc kubenswrapper[4884]: I1210 02:04:09.215949 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/reloader/0.log" Dec 10 02:04:09 crc kubenswrapper[4884]: I1210 02:04:09.324105 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-tvnsf_e737cf7c-81d6-4aa0-b9c3-c3f2de596724/frr-k8s-webhook-server/0.log" Dec 10 02:04:09 crc kubenswrapper[4884]: I1210 02:04:09.596465 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-866896889b-hszbz_23b67b7c-b959-4f92-96c1-17daa41985c9/manager/0.log" Dec 10 02:04:09 crc kubenswrapper[4884]: I1210 02:04:09.737236 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-556949cfd9-7msjt_4377a5f5-ae57-4c9a-9e25-a69371de7097/webhook-server/0.log" Dec 10 02:04:09 crc kubenswrapper[4884]: I1210 02:04:09.849463 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-2bbfm_7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660/kube-rbac-proxy/0.log" Dec 10 02:04:10 crc kubenswrapper[4884]: I1210 02:04:10.419687 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-2bbfm_7e941ef6-a1e7-47b7-b7cb-eaaaaa2a8660/speaker/0.log" Dec 10 02:04:10 crc kubenswrapper[4884]: I1210 02:04:10.553014 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-5gsrt_8da05654-9ed1-48fc-8d0f-b507abeda5f0/frr/0.log" Dec 10 02:04:16 crc kubenswrapper[4884]: E1210 02:04:16.291607 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:04:17 crc kubenswrapper[4884]: E1210 02:04:17.298736 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:04:26 crc kubenswrapper[4884]: I1210 02:04:26.203126 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7_c514393e-bc0a-4da5-9b45-de376e94eef8/util/0.log" Dec 10 02:04:26 crc kubenswrapper[4884]: I1210 02:04:26.409826 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7_c514393e-bc0a-4da5-9b45-de376e94eef8/util/0.log" Dec 10 02:04:26 crc kubenswrapper[4884]: I1210 02:04:26.480502 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7_c514393e-bc0a-4da5-9b45-de376e94eef8/pull/0.log" Dec 10 02:04:26 crc kubenswrapper[4884]: I1210 02:04:26.493402 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7_c514393e-bc0a-4da5-9b45-de376e94eef8/pull/0.log" Dec 10 02:04:27 crc kubenswrapper[4884]: I1210 02:04:27.413064 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7_c514393e-bc0a-4da5-9b45-de376e94eef8/util/0.log" Dec 10 02:04:27 crc kubenswrapper[4884]: I1210 02:04:27.425014 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7_c514393e-bc0a-4da5-9b45-de376e94eef8/pull/0.log" Dec 10 02:04:27 crc kubenswrapper[4884]: I1210 02:04:27.546644 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_4529ed37fc81381df2b45ea09e6f1b4af8d1558d603912431befd8aeb8kgcq7_c514393e-bc0a-4da5-9b45-de376e94eef8/extract/0.log" Dec 10 02:04:27 crc kubenswrapper[4884]: I1210 02:04:27.555200 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk_54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8/util/0.log" Dec 10 02:04:27 crc kubenswrapper[4884]: I1210 02:04:27.858826 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk_54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8/pull/0.log" Dec 10 02:04:27 crc kubenswrapper[4884]: I1210 02:04:27.869957 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk_54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8/util/0.log" Dec 10 02:04:27 crc kubenswrapper[4884]: I1210 02:04:27.887604 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk_54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8/pull/0.log" Dec 10 02:04:28 crc kubenswrapper[4884]: I1210 02:04:28.123123 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk_54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8/util/0.log" Dec 10 02:04:28 crc kubenswrapper[4884]: I1210 02:04:28.137620 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk_54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8/pull/0.log" Dec 10 02:04:28 crc kubenswrapper[4884]: I1210 02:04:28.157729 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212fs4mhk_54ba7596-fc7d-4bfc-ac1f-5e596a14fbb8/extract/0.log" Dec 10 02:04:28 crc kubenswrapper[4884]: I1210 02:04:28.305955 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld_7d1f1281-4b1e-4b2f-988d-c0a3a4045592/util/0.log" Dec 10 02:04:28 crc kubenswrapper[4884]: I1210 02:04:28.531601 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld_7d1f1281-4b1e-4b2f-988d-c0a3a4045592/util/0.log" Dec 10 02:04:28 crc kubenswrapper[4884]: I1210 02:04:28.538120 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld_7d1f1281-4b1e-4b2f-988d-c0a3a4045592/pull/0.log" Dec 10 02:04:28 crc kubenswrapper[4884]: I1210 02:04:28.575172 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld_7d1f1281-4b1e-4b2f-988d-c0a3a4045592/pull/0.log" Dec 10 02:04:28 crc kubenswrapper[4884]: I1210 02:04:28.752634 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld_7d1f1281-4b1e-4b2f-988d-c0a3a4045592/extract/0.log" Dec 10 02:04:28 crc kubenswrapper[4884]: I1210 02:04:28.774458 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld_7d1f1281-4b1e-4b2f-988d-c0a3a4045592/util/0.log" Dec 10 02:04:28 crc kubenswrapper[4884]: I1210 02:04:28.784872 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210zqlld_7d1f1281-4b1e-4b2f-988d-c0a3a4045592/pull/0.log" Dec 10 02:04:28 crc kubenswrapper[4884]: I1210 02:04:28.943646 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l_37f7e3ef-f9a6-4975-ae21-bee745d3063d/util/0.log" Dec 10 02:04:29 crc kubenswrapper[4884]: I1210 02:04:29.089976 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l_37f7e3ef-f9a6-4975-ae21-bee745d3063d/pull/0.log" Dec 10 02:04:29 crc kubenswrapper[4884]: I1210 02:04:29.152351 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l_37f7e3ef-f9a6-4975-ae21-bee745d3063d/util/0.log" Dec 10 02:04:29 crc kubenswrapper[4884]: I1210 02:04:29.157344 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l_37f7e3ef-f9a6-4975-ae21-bee745d3063d/pull/0.log" Dec 10 02:04:29 crc kubenswrapper[4884]: I1210 02:04:29.343077 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l_37f7e3ef-f9a6-4975-ae21-bee745d3063d/util/0.log" Dec 10 02:04:29 crc kubenswrapper[4884]: I1210 02:04:29.378306 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l_37f7e3ef-f9a6-4975-ae21-bee745d3063d/extract/0.log" Dec 10 02:04:29 crc kubenswrapper[4884]: I1210 02:04:29.378871 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_a8a03f72555e3294619fd3c0a789fa82d1f6921a8cf9935ed9b211463frxt5l_37f7e3ef-f9a6-4975-ae21-bee745d3063d/pull/0.log" Dec 10 02:04:29 crc kubenswrapper[4884]: I1210 02:04:29.477212 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx_8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba/util/0.log" Dec 10 02:04:29 crc kubenswrapper[4884]: I1210 02:04:29.649677 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx_8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba/pull/0.log" Dec 10 02:04:29 crc kubenswrapper[4884]: I1210 02:04:29.746379 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx_8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba/util/0.log" Dec 10 02:04:29 crc kubenswrapper[4884]: I1210 02:04:29.764261 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx_8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba/pull/0.log" Dec 10 02:04:29 crc kubenswrapper[4884]: I1210 02:04:29.923764 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx_8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba/util/0.log" Dec 10 02:04:29 crc kubenswrapper[4884]: I1210 02:04:29.928300 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx_8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba/pull/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.018916 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83sd6sx_8be574b7-a243-46dd-bbd6-5b9bf9fcb1ba/extract/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.026509 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7vf7m_218fb300-2f8e-4ba7-a947-a45ed8426678/extract-utilities/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.150877 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7vf7m_218fb300-2f8e-4ba7-a947-a45ed8426678/extract-content/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.151022 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7vf7m_218fb300-2f8e-4ba7-a947-a45ed8426678/extract-content/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.181988 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7vf7m_218fb300-2f8e-4ba7-a947-a45ed8426678/extract-utilities/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: E1210 02:04:30.288698 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.357454 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7vf7m_218fb300-2f8e-4ba7-a947-a45ed8426678/extract-utilities/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.392230 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7vf7m_218fb300-2f8e-4ba7-a947-a45ed8426678/extract-content/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.472564 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-24s97_fbd36c7b-a8d4-4812-a133-4a82a466dbc7/extract-utilities/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.700462 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-24s97_fbd36c7b-a8d4-4812-a133-4a82a466dbc7/extract-content/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.732051 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-24s97_fbd36c7b-a8d4-4812-a133-4a82a466dbc7/extract-utilities/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.785761 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-24s97_fbd36c7b-a8d4-4812-a133-4a82a466dbc7/extract-content/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.871545 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-24s97_fbd36c7b-a8d4-4812-a133-4a82a466dbc7/extract-utilities/0.log" Dec 10 02:04:30 crc kubenswrapper[4884]: I1210 02:04:30.898452 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-24s97_fbd36c7b-a8d4-4812-a133-4a82a466dbc7/extract-content/0.log" Dec 10 02:04:31 crc kubenswrapper[4884]: I1210 02:04:31.055412 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-dccsc_aacf65ac-3787-462f-ad27-fa5663a86d99/marketplace-operator/0.log" Dec 10 02:04:31 crc kubenswrapper[4884]: I1210 02:04:31.155004 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c95cd_d888450e-2455-4c74-9931-699668b137a8/extract-utilities/0.log" Dec 10 02:04:31 crc kubenswrapper[4884]: E1210 02:04:31.289263 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:04:31 crc kubenswrapper[4884]: I1210 02:04:31.331463 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c95cd_d888450e-2455-4c74-9931-699668b137a8/extract-utilities/0.log" Dec 10 02:04:31 crc kubenswrapper[4884]: I1210 02:04:31.350274 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c95cd_d888450e-2455-4c74-9931-699668b137a8/extract-content/0.log" Dec 10 02:04:31 crc kubenswrapper[4884]: I1210 02:04:31.351601 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c95cd_d888450e-2455-4c74-9931-699668b137a8/extract-content/0.log" Dec 10 02:04:31 crc kubenswrapper[4884]: I1210 02:04:31.547235 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c95cd_d888450e-2455-4c74-9931-699668b137a8/extract-utilities/0.log" Dec 10 02:04:31 crc kubenswrapper[4884]: I1210 02:04:31.559794 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c95cd_d888450e-2455-4c74-9931-699668b137a8/extract-content/0.log" Dec 10 02:04:31 crc kubenswrapper[4884]: I1210 02:04:31.758422 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lz2kk_2faf949b-c390-442b-8b2c-dc394506f3d4/extract-utilities/0.log" Dec 10 02:04:31 crc kubenswrapper[4884]: I1210 02:04:31.971801 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lz2kk_2faf949b-c390-442b-8b2c-dc394506f3d4/extract-utilities/0.log" Dec 10 02:04:32 crc kubenswrapper[4884]: I1210 02:04:32.003281 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lz2kk_2faf949b-c390-442b-8b2c-dc394506f3d4/extract-content/0.log" Dec 10 02:04:32 crc kubenswrapper[4884]: I1210 02:04:32.106662 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-c95cd_d888450e-2455-4c74-9931-699668b137a8/registry-server/0.log" Dec 10 02:04:32 crc kubenswrapper[4884]: I1210 02:04:32.183323 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lz2kk_2faf949b-c390-442b-8b2c-dc394506f3d4/extract-content/0.log" Dec 10 02:04:32 crc kubenswrapper[4884]: I1210 02:04:32.390076 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lz2kk_2faf949b-c390-442b-8b2c-dc394506f3d4/extract-utilities/0.log" Dec 10 02:04:32 crc kubenswrapper[4884]: I1210 02:04:32.444986 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lz2kk_2faf949b-c390-442b-8b2c-dc394506f3d4/extract-content/0.log" Dec 10 02:04:32 crc kubenswrapper[4884]: I1210 02:04:32.709017 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-7vf7m_218fb300-2f8e-4ba7-a947-a45ed8426678/registry-server/0.log" Dec 10 02:04:32 crc kubenswrapper[4884]: I1210 02:04:32.893396 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-24s97_fbd36c7b-a8d4-4812-a133-4a82a466dbc7/registry-server/0.log" Dec 10 02:04:33 crc kubenswrapper[4884]: I1210 02:04:33.301718 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-lz2kk_2faf949b-c390-442b-8b2c-dc394506f3d4/registry-server/0.log" Dec 10 02:04:43 crc kubenswrapper[4884]: E1210 02:04:43.289161 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:04:44 crc kubenswrapper[4884]: E1210 02:04:44.290228 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:04:48 crc kubenswrapper[4884]: I1210 02:04:48.098378 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 02:04:48 crc kubenswrapper[4884]: I1210 02:04:48.098874 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 02:04:49 crc kubenswrapper[4884]: I1210 02:04:49.180315 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-jvfjm_ed228a3c-fc9a-4bb0-9517-3b9b7b7c76b8/prometheus-operator/0.log" Dec 10 02:04:49 crc kubenswrapper[4884]: I1210 02:04:49.348656 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-74d464f9dd-f2b95_0a9a5957-1f59-42ac-bc0d-3f3b494c4603/prometheus-operator-admission-webhook/0.log" Dec 10 02:04:49 crc kubenswrapper[4884]: I1210 02:04:49.376715 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-74d464f9dd-ngsth_88c45bd9-28e4-457c-aa6e-571ef793eda2/prometheus-operator-admission-webhook/0.log" Dec 10 02:04:49 crc kubenswrapper[4884]: I1210 02:04:49.525526 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-4pc64_965132f2-3d6b-4773-ac9a-9b1e964fb251/operator/0.log" Dec 10 02:04:49 crc kubenswrapper[4884]: I1210 02:04:49.587666 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-ui-dashboards-7d5fb4cbfb-dlcnl_7a2ff0cb-6085-4680-a330-aec1d9452896/observability-ui-dashboards/0.log" Dec 10 02:04:49 crc kubenswrapper[4884]: I1210 02:04:49.722385 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-qtpmp_5d755f81-3b23-4251-a7c1-6c2ab0a4695d/perses-operator/0.log" Dec 10 02:04:55 crc kubenswrapper[4884]: E1210 02:04:55.292317 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:04:58 crc kubenswrapper[4884]: E1210 02:04:58.289708 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:05:04 crc kubenswrapper[4884]: I1210 02:05:04.611421 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-887bbc66c-sjvbc_0495f56c-e0ff-4bb7-861c-21754379af3f/kube-rbac-proxy/0.log" Dec 10 02:05:04 crc kubenswrapper[4884]: I1210 02:05:04.621999 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators-redhat_loki-operator-controller-manager-887bbc66c-sjvbc_0495f56c-e0ff-4bb7-861c-21754379af3f/manager/0.log" Dec 10 02:05:06 crc kubenswrapper[4884]: E1210 02:05:06.289490 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:05:11 crc kubenswrapper[4884]: E1210 02:05:11.290026 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:05:18 crc kubenswrapper[4884]: I1210 02:05:18.097888 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 02:05:18 crc kubenswrapper[4884]: I1210 02:05:18.098346 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 02:05:19 crc kubenswrapper[4884]: E1210 02:05:19.289155 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:05:26 crc kubenswrapper[4884]: E1210 02:05:26.288558 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:05:34 crc kubenswrapper[4884]: E1210 02:05:34.290090 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:05:41 crc kubenswrapper[4884]: E1210 02:05:41.291798 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.084862 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cshpj"] Dec 10 02:05:47 crc kubenswrapper[4884]: E1210 02:05:47.085932 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a301df2a-d84a-4216-b904-8691cab27c3e" containerName="extract-content" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.085949 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a301df2a-d84a-4216-b904-8691cab27c3e" containerName="extract-content" Dec 10 02:05:47 crc kubenswrapper[4884]: E1210 02:05:47.085983 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a301df2a-d84a-4216-b904-8691cab27c3e" containerName="registry-server" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.085991 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a301df2a-d84a-4216-b904-8691cab27c3e" containerName="registry-server" Dec 10 02:05:47 crc kubenswrapper[4884]: E1210 02:05:47.086015 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a301df2a-d84a-4216-b904-8691cab27c3e" containerName="extract-utilities" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.086023 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="a301df2a-d84a-4216-b904-8691cab27c3e" containerName="extract-utilities" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.086276 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="a301df2a-d84a-4216-b904-8691cab27c3e" containerName="registry-server" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.088258 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.100835 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cshpj"] Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.188402 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvgmq\" (UniqueName: \"kubernetes.io/projected/b3c30ed2-d9ff-42db-92c5-7690fc57147d-kube-api-access-tvgmq\") pod \"redhat-operators-cshpj\" (UID: \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\") " pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.188571 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c30ed2-d9ff-42db-92c5-7690fc57147d-utilities\") pod \"redhat-operators-cshpj\" (UID: \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\") " pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.188616 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c30ed2-d9ff-42db-92c5-7690fc57147d-catalog-content\") pod \"redhat-operators-cshpj\" (UID: \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\") " pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.295127 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c30ed2-d9ff-42db-92c5-7690fc57147d-utilities\") pod \"redhat-operators-cshpj\" (UID: \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\") " pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.295568 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c30ed2-d9ff-42db-92c5-7690fc57147d-catalog-content\") pod \"redhat-operators-cshpj\" (UID: \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\") " pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.295782 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvgmq\" (UniqueName: \"kubernetes.io/projected/b3c30ed2-d9ff-42db-92c5-7690fc57147d-kube-api-access-tvgmq\") pod \"redhat-operators-cshpj\" (UID: \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\") " pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.296073 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c30ed2-d9ff-42db-92c5-7690fc57147d-utilities\") pod \"redhat-operators-cshpj\" (UID: \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\") " pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.297410 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c30ed2-d9ff-42db-92c5-7690fc57147d-catalog-content\") pod \"redhat-operators-cshpj\" (UID: \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\") " pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.349380 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvgmq\" (UniqueName: \"kubernetes.io/projected/b3c30ed2-d9ff-42db-92c5-7690fc57147d-kube-api-access-tvgmq\") pod \"redhat-operators-cshpj\" (UID: \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\") " pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.416785 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:47 crc kubenswrapper[4884]: I1210 02:05:47.997082 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cshpj"] Dec 10 02:05:48 crc kubenswrapper[4884]: I1210 02:05:48.098610 4884 patch_prober.go:28] interesting pod/machine-config-daemon-8zcgx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 10 02:05:48 crc kubenswrapper[4884]: I1210 02:05:48.099321 4884 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 10 02:05:48 crc kubenswrapper[4884]: I1210 02:05:48.099585 4884 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" Dec 10 02:05:48 crc kubenswrapper[4884]: I1210 02:05:48.100525 4884 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804"} pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 10 02:05:48 crc kubenswrapper[4884]: I1210 02:05:48.100572 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerName="machine-config-daemon" containerID="cri-o://0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" gracePeriod=600 Dec 10 02:05:48 crc kubenswrapper[4884]: I1210 02:05:48.147144 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cshpj" event={"ID":"b3c30ed2-d9ff-42db-92c5-7690fc57147d","Type":"ContainerStarted","Data":"edf1dbfafc7537449bc278d9656bd17cb601a77d15301a05f96d818c1284b9a7"} Dec 10 02:05:48 crc kubenswrapper[4884]: E1210 02:05:48.224293 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.165500 4884 generic.go:334] "Generic (PLEG): container finished" podID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" exitCode=0 Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.165569 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerDied","Data":"0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804"} Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.166116 4884 scope.go:117] "RemoveContainer" containerID="e8f4eac03ccca0f0d31a1e5527a9ae76c203b8538de41b60f9d5802efc9a7452" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.166947 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:05:49 crc kubenswrapper[4884]: E1210 02:05:49.167354 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.169820 4884 generic.go:334] "Generic (PLEG): container finished" podID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" containerID="b97ddec97323014bb7d70a8326100097b5a8c102870be2738dd5d8c4e5362a37" exitCode=0 Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.169877 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cshpj" event={"ID":"b3c30ed2-d9ff-42db-92c5-7690fc57147d","Type":"ContainerDied","Data":"b97ddec97323014bb7d70a8326100097b5a8c102870be2738dd5d8c4e5362a37"} Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.280971 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5pvzp"] Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.283623 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:05:49 crc kubenswrapper[4884]: E1210 02:05:49.292129 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.314992 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5pvzp"] Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.445386 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/103435a6-0b32-4643-a1fb-ab5d83889dbe-catalog-content\") pod \"certified-operators-5pvzp\" (UID: \"103435a6-0b32-4643-a1fb-ab5d83889dbe\") " pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.445476 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmxb2\" (UniqueName: \"kubernetes.io/projected/103435a6-0b32-4643-a1fb-ab5d83889dbe-kube-api-access-tmxb2\") pod \"certified-operators-5pvzp\" (UID: \"103435a6-0b32-4643-a1fb-ab5d83889dbe\") " pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.445516 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/103435a6-0b32-4643-a1fb-ab5d83889dbe-utilities\") pod \"certified-operators-5pvzp\" (UID: \"103435a6-0b32-4643-a1fb-ab5d83889dbe\") " pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.547279 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/103435a6-0b32-4643-a1fb-ab5d83889dbe-catalog-content\") pod \"certified-operators-5pvzp\" (UID: \"103435a6-0b32-4643-a1fb-ab5d83889dbe\") " pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.547358 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmxb2\" (UniqueName: \"kubernetes.io/projected/103435a6-0b32-4643-a1fb-ab5d83889dbe-kube-api-access-tmxb2\") pod \"certified-operators-5pvzp\" (UID: \"103435a6-0b32-4643-a1fb-ab5d83889dbe\") " pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.547451 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/103435a6-0b32-4643-a1fb-ab5d83889dbe-utilities\") pod \"certified-operators-5pvzp\" (UID: \"103435a6-0b32-4643-a1fb-ab5d83889dbe\") " pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.548115 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/103435a6-0b32-4643-a1fb-ab5d83889dbe-catalog-content\") pod \"certified-operators-5pvzp\" (UID: \"103435a6-0b32-4643-a1fb-ab5d83889dbe\") " pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.548211 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/103435a6-0b32-4643-a1fb-ab5d83889dbe-utilities\") pod \"certified-operators-5pvzp\" (UID: \"103435a6-0b32-4643-a1fb-ab5d83889dbe\") " pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.569667 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmxb2\" (UniqueName: \"kubernetes.io/projected/103435a6-0b32-4643-a1fb-ab5d83889dbe-kube-api-access-tmxb2\") pod \"certified-operators-5pvzp\" (UID: \"103435a6-0b32-4643-a1fb-ab5d83889dbe\") " pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:05:49 crc kubenswrapper[4884]: I1210 02:05:49.607972 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:05:50 crc kubenswrapper[4884]: I1210 02:05:50.156384 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5pvzp"] Dec 10 02:05:50 crc kubenswrapper[4884]: W1210 02:05:50.162788 4884 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod103435a6_0b32_4643_a1fb_ab5d83889dbe.slice/crio-09087c4f985c577a2297842c7df3e7c9a67953966fca2b797f4b02278fdcfc18 WatchSource:0}: Error finding container 09087c4f985c577a2297842c7df3e7c9a67953966fca2b797f4b02278fdcfc18: Status 404 returned error can't find the container with id 09087c4f985c577a2297842c7df3e7c9a67953966fca2b797f4b02278fdcfc18 Dec 10 02:05:50 crc kubenswrapper[4884]: I1210 02:05:50.184590 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pvzp" event={"ID":"103435a6-0b32-4643-a1fb-ab5d83889dbe","Type":"ContainerStarted","Data":"09087c4f985c577a2297842c7df3e7c9a67953966fca2b797f4b02278fdcfc18"} Dec 10 02:05:51 crc kubenswrapper[4884]: I1210 02:05:51.206511 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cshpj" event={"ID":"b3c30ed2-d9ff-42db-92c5-7690fc57147d","Type":"ContainerStarted","Data":"373feb2ca7ee44da34c8cbb85db29b9a8da54e740c6eab50ca9d0f1b5fb7ef90"} Dec 10 02:05:51 crc kubenswrapper[4884]: I1210 02:05:51.209210 4884 generic.go:334] "Generic (PLEG): container finished" podID="103435a6-0b32-4643-a1fb-ab5d83889dbe" containerID="4d214faaa8f5a30485350e8347421fc713a6130d1ecc62ff4ca1379a7faf74c3" exitCode=0 Dec 10 02:05:51 crc kubenswrapper[4884]: I1210 02:05:51.209300 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pvzp" event={"ID":"103435a6-0b32-4643-a1fb-ab5d83889dbe","Type":"ContainerDied","Data":"4d214faaa8f5a30485350e8347421fc713a6130d1ecc62ff4ca1379a7faf74c3"} Dec 10 02:05:52 crc kubenswrapper[4884]: E1210 02:05:52.289354 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:05:53 crc kubenswrapper[4884]: I1210 02:05:53.230301 4884 generic.go:334] "Generic (PLEG): container finished" podID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" containerID="373feb2ca7ee44da34c8cbb85db29b9a8da54e740c6eab50ca9d0f1b5fb7ef90" exitCode=0 Dec 10 02:05:53 crc kubenswrapper[4884]: I1210 02:05:53.230880 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cshpj" event={"ID":"b3c30ed2-d9ff-42db-92c5-7690fc57147d","Type":"ContainerDied","Data":"373feb2ca7ee44da34c8cbb85db29b9a8da54e740c6eab50ca9d0f1b5fb7ef90"} Dec 10 02:05:53 crc kubenswrapper[4884]: I1210 02:05:53.234808 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pvzp" event={"ID":"103435a6-0b32-4643-a1fb-ab5d83889dbe","Type":"ContainerStarted","Data":"68006ac4fa818621e3f9dc9a050dc9df1db88fde8cfe81dbfac93cf99742a4b9"} Dec 10 02:05:55 crc kubenswrapper[4884]: I1210 02:05:55.272606 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cshpj" event={"ID":"b3c30ed2-d9ff-42db-92c5-7690fc57147d","Type":"ContainerStarted","Data":"916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738"} Dec 10 02:05:55 crc kubenswrapper[4884]: I1210 02:05:55.300952 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cshpj" podStartSLOduration=3.437253208 podStartE2EDuration="8.30092618s" podCreationTimestamp="2025-12-10 02:05:47 +0000 UTC" firstStartedPulling="2025-12-10 02:05:49.171953015 +0000 UTC m=+5722.249910172" lastFinishedPulling="2025-12-10 02:05:54.035626027 +0000 UTC m=+5727.113583144" observedRunningTime="2025-12-10 02:05:55.293651585 +0000 UTC m=+5728.371608732" watchObservedRunningTime="2025-12-10 02:05:55.30092618 +0000 UTC m=+5728.378883307" Dec 10 02:05:56 crc kubenswrapper[4884]: I1210 02:05:56.297787 4884 generic.go:334] "Generic (PLEG): container finished" podID="103435a6-0b32-4643-a1fb-ab5d83889dbe" containerID="68006ac4fa818621e3f9dc9a050dc9df1db88fde8cfe81dbfac93cf99742a4b9" exitCode=0 Dec 10 02:05:56 crc kubenswrapper[4884]: I1210 02:05:56.297910 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pvzp" event={"ID":"103435a6-0b32-4643-a1fb-ab5d83889dbe","Type":"ContainerDied","Data":"68006ac4fa818621e3f9dc9a050dc9df1db88fde8cfe81dbfac93cf99742a4b9"} Dec 10 02:05:57 crc kubenswrapper[4884]: I1210 02:05:57.416927 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:57 crc kubenswrapper[4884]: I1210 02:05:57.417194 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:05:58 crc kubenswrapper[4884]: I1210 02:05:58.328452 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pvzp" event={"ID":"103435a6-0b32-4643-a1fb-ab5d83889dbe","Type":"ContainerStarted","Data":"5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf"} Dec 10 02:05:58 crc kubenswrapper[4884]: I1210 02:05:58.360625 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5pvzp" podStartSLOduration=2.916076228 podStartE2EDuration="9.360607888s" podCreationTimestamp="2025-12-10 02:05:49 +0000 UTC" firstStartedPulling="2025-12-10 02:05:51.212480154 +0000 UTC m=+5724.290437301" lastFinishedPulling="2025-12-10 02:05:57.657011844 +0000 UTC m=+5730.734968961" observedRunningTime="2025-12-10 02:05:58.348309928 +0000 UTC m=+5731.426267065" watchObservedRunningTime="2025-12-10 02:05:58.360607888 +0000 UTC m=+5731.438565005" Dec 10 02:05:58 crc kubenswrapper[4884]: I1210 02:05:58.461465 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-cshpj" podUID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" containerName="registry-server" probeResult="failure" output=< Dec 10 02:05:58 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 02:05:58 crc kubenswrapper[4884]: > Dec 10 02:05:59 crc kubenswrapper[4884]: I1210 02:05:59.608135 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:05:59 crc kubenswrapper[4884]: I1210 02:05:59.608502 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:06:00 crc kubenswrapper[4884]: I1210 02:06:00.665243 4884 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-5pvzp" podUID="103435a6-0b32-4643-a1fb-ab5d83889dbe" containerName="registry-server" probeResult="failure" output=< Dec 10 02:06:00 crc kubenswrapper[4884]: timeout: failed to connect service ":50051" within 1s Dec 10 02:06:00 crc kubenswrapper[4884]: > Dec 10 02:06:02 crc kubenswrapper[4884]: I1210 02:06:02.287354 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:06:02 crc kubenswrapper[4884]: E1210 02:06:02.288793 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:06:04 crc kubenswrapper[4884]: E1210 02:06:04.289160 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:06:06 crc kubenswrapper[4884]: E1210 02:06:06.293853 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:06:07 crc kubenswrapper[4884]: I1210 02:06:07.502325 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:06:07 crc kubenswrapper[4884]: I1210 02:06:07.579425 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:06:07 crc kubenswrapper[4884]: I1210 02:06:07.756271 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cshpj"] Dec 10 02:06:09 crc kubenswrapper[4884]: I1210 02:06:09.459027 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cshpj" podUID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" containerName="registry-server" containerID="cri-o://916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738" gracePeriod=2 Dec 10 02:06:09 crc kubenswrapper[4884]: I1210 02:06:09.680617 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:06:09 crc kubenswrapper[4884]: I1210 02:06:09.745543 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.013084 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.043607 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c30ed2-d9ff-42db-92c5-7690fc57147d-utilities\") pod \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\" (UID: \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\") " Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.043703 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvgmq\" (UniqueName: \"kubernetes.io/projected/b3c30ed2-d9ff-42db-92c5-7690fc57147d-kube-api-access-tvgmq\") pod \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\" (UID: \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\") " Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.043906 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c30ed2-d9ff-42db-92c5-7690fc57147d-catalog-content\") pod \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\" (UID: \"b3c30ed2-d9ff-42db-92c5-7690fc57147d\") " Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.044478 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3c30ed2-d9ff-42db-92c5-7690fc57147d-utilities" (OuterVolumeSpecName: "utilities") pod "b3c30ed2-d9ff-42db-92c5-7690fc57147d" (UID: "b3c30ed2-d9ff-42db-92c5-7690fc57147d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.044721 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c30ed2-d9ff-42db-92c5-7690fc57147d-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.055552 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3c30ed2-d9ff-42db-92c5-7690fc57147d-kube-api-access-tvgmq" (OuterVolumeSpecName: "kube-api-access-tvgmq") pod "b3c30ed2-d9ff-42db-92c5-7690fc57147d" (UID: "b3c30ed2-d9ff-42db-92c5-7690fc57147d"). InnerVolumeSpecName "kube-api-access-tvgmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.146793 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvgmq\" (UniqueName: \"kubernetes.io/projected/b3c30ed2-d9ff-42db-92c5-7690fc57147d-kube-api-access-tvgmq\") on node \"crc\" DevicePath \"\"" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.157845 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5pvzp"] Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.183645 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3c30ed2-d9ff-42db-92c5-7690fc57147d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b3c30ed2-d9ff-42db-92c5-7690fc57147d" (UID: "b3c30ed2-d9ff-42db-92c5-7690fc57147d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.252687 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c30ed2-d9ff-42db-92c5-7690fc57147d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.476339 4884 generic.go:334] "Generic (PLEG): container finished" podID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" containerID="916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738" exitCode=0 Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.476488 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cshpj" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.476489 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cshpj" event={"ID":"b3c30ed2-d9ff-42db-92c5-7690fc57147d","Type":"ContainerDied","Data":"916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738"} Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.476585 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cshpj" event={"ID":"b3c30ed2-d9ff-42db-92c5-7690fc57147d","Type":"ContainerDied","Data":"edf1dbfafc7537449bc278d9656bd17cb601a77d15301a05f96d818c1284b9a7"} Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.476634 4884 scope.go:117] "RemoveContainer" containerID="916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.517618 4884 scope.go:117] "RemoveContainer" containerID="373feb2ca7ee44da34c8cbb85db29b9a8da54e740c6eab50ca9d0f1b5fb7ef90" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.541290 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cshpj"] Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.555464 4884 scope.go:117] "RemoveContainer" containerID="b97ddec97323014bb7d70a8326100097b5a8c102870be2738dd5d8c4e5362a37" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.559242 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cshpj"] Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.614151 4884 scope.go:117] "RemoveContainer" containerID="916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738" Dec 10 02:06:10 crc kubenswrapper[4884]: E1210 02:06:10.614768 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738\": container with ID starting with 916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738 not found: ID does not exist" containerID="916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.614812 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738"} err="failed to get container status \"916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738\": rpc error: code = NotFound desc = could not find container \"916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738\": container with ID starting with 916969ccd67db3636209903fe0c2e76467f68a13e7de1e85e91d697e49d69738 not found: ID does not exist" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.614847 4884 scope.go:117] "RemoveContainer" containerID="373feb2ca7ee44da34c8cbb85db29b9a8da54e740c6eab50ca9d0f1b5fb7ef90" Dec 10 02:06:10 crc kubenswrapper[4884]: E1210 02:06:10.615504 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"373feb2ca7ee44da34c8cbb85db29b9a8da54e740c6eab50ca9d0f1b5fb7ef90\": container with ID starting with 373feb2ca7ee44da34c8cbb85db29b9a8da54e740c6eab50ca9d0f1b5fb7ef90 not found: ID does not exist" containerID="373feb2ca7ee44da34c8cbb85db29b9a8da54e740c6eab50ca9d0f1b5fb7ef90" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.615543 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"373feb2ca7ee44da34c8cbb85db29b9a8da54e740c6eab50ca9d0f1b5fb7ef90"} err="failed to get container status \"373feb2ca7ee44da34c8cbb85db29b9a8da54e740c6eab50ca9d0f1b5fb7ef90\": rpc error: code = NotFound desc = could not find container \"373feb2ca7ee44da34c8cbb85db29b9a8da54e740c6eab50ca9d0f1b5fb7ef90\": container with ID starting with 373feb2ca7ee44da34c8cbb85db29b9a8da54e740c6eab50ca9d0f1b5fb7ef90 not found: ID does not exist" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.615565 4884 scope.go:117] "RemoveContainer" containerID="b97ddec97323014bb7d70a8326100097b5a8c102870be2738dd5d8c4e5362a37" Dec 10 02:06:10 crc kubenswrapper[4884]: E1210 02:06:10.615928 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b97ddec97323014bb7d70a8326100097b5a8c102870be2738dd5d8c4e5362a37\": container with ID starting with b97ddec97323014bb7d70a8326100097b5a8c102870be2738dd5d8c4e5362a37 not found: ID does not exist" containerID="b97ddec97323014bb7d70a8326100097b5a8c102870be2738dd5d8c4e5362a37" Dec 10 02:06:10 crc kubenswrapper[4884]: I1210 02:06:10.615968 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b97ddec97323014bb7d70a8326100097b5a8c102870be2738dd5d8c4e5362a37"} err="failed to get container status \"b97ddec97323014bb7d70a8326100097b5a8c102870be2738dd5d8c4e5362a37\": rpc error: code = NotFound desc = could not find container \"b97ddec97323014bb7d70a8326100097b5a8c102870be2738dd5d8c4e5362a37\": container with ID starting with b97ddec97323014bb7d70a8326100097b5a8c102870be2738dd5d8c4e5362a37 not found: ID does not exist" Dec 10 02:06:11 crc kubenswrapper[4884]: I1210 02:06:11.307877 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" path="/var/lib/kubelet/pods/b3c30ed2-d9ff-42db-92c5-7690fc57147d/volumes" Dec 10 02:06:11 crc kubenswrapper[4884]: I1210 02:06:11.491621 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5pvzp" podUID="103435a6-0b32-4643-a1fb-ab5d83889dbe" containerName="registry-server" containerID="cri-o://5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf" gracePeriod=2 Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.070099 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.204556 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmxb2\" (UniqueName: \"kubernetes.io/projected/103435a6-0b32-4643-a1fb-ab5d83889dbe-kube-api-access-tmxb2\") pod \"103435a6-0b32-4643-a1fb-ab5d83889dbe\" (UID: \"103435a6-0b32-4643-a1fb-ab5d83889dbe\") " Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.204757 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/103435a6-0b32-4643-a1fb-ab5d83889dbe-catalog-content\") pod \"103435a6-0b32-4643-a1fb-ab5d83889dbe\" (UID: \"103435a6-0b32-4643-a1fb-ab5d83889dbe\") " Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.204779 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/103435a6-0b32-4643-a1fb-ab5d83889dbe-utilities\") pod \"103435a6-0b32-4643-a1fb-ab5d83889dbe\" (UID: \"103435a6-0b32-4643-a1fb-ab5d83889dbe\") " Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.205929 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/103435a6-0b32-4643-a1fb-ab5d83889dbe-utilities" (OuterVolumeSpecName: "utilities") pod "103435a6-0b32-4643-a1fb-ab5d83889dbe" (UID: "103435a6-0b32-4643-a1fb-ab5d83889dbe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.213059 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/103435a6-0b32-4643-a1fb-ab5d83889dbe-kube-api-access-tmxb2" (OuterVolumeSpecName: "kube-api-access-tmxb2") pod "103435a6-0b32-4643-a1fb-ab5d83889dbe" (UID: "103435a6-0b32-4643-a1fb-ab5d83889dbe"). InnerVolumeSpecName "kube-api-access-tmxb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.250270 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/103435a6-0b32-4643-a1fb-ab5d83889dbe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "103435a6-0b32-4643-a1fb-ab5d83889dbe" (UID: "103435a6-0b32-4643-a1fb-ab5d83889dbe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.306911 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/103435a6-0b32-4643-a1fb-ab5d83889dbe-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.306958 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/103435a6-0b32-4643-a1fb-ab5d83889dbe-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.306971 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmxb2\" (UniqueName: \"kubernetes.io/projected/103435a6-0b32-4643-a1fb-ab5d83889dbe-kube-api-access-tmxb2\") on node \"crc\" DevicePath \"\"" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.508934 4884 generic.go:334] "Generic (PLEG): container finished" podID="103435a6-0b32-4643-a1fb-ab5d83889dbe" containerID="5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf" exitCode=0 Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.508987 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pvzp" event={"ID":"103435a6-0b32-4643-a1fb-ab5d83889dbe","Type":"ContainerDied","Data":"5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf"} Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.509064 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5pvzp" event={"ID":"103435a6-0b32-4643-a1fb-ab5d83889dbe","Type":"ContainerDied","Data":"09087c4f985c577a2297842c7df3e7c9a67953966fca2b797f4b02278fdcfc18"} Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.509120 4884 scope.go:117] "RemoveContainer" containerID="5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.509145 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5pvzp" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.562125 4884 scope.go:117] "RemoveContainer" containerID="68006ac4fa818621e3f9dc9a050dc9df1db88fde8cfe81dbfac93cf99742a4b9" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.577627 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5pvzp"] Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.580842 4884 scope.go:117] "RemoveContainer" containerID="4d214faaa8f5a30485350e8347421fc713a6130d1ecc62ff4ca1379a7faf74c3" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.587894 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5pvzp"] Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.651107 4884 scope.go:117] "RemoveContainer" containerID="5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf" Dec 10 02:06:12 crc kubenswrapper[4884]: E1210 02:06:12.652677 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf\": container with ID starting with 5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf not found: ID does not exist" containerID="5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.652719 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf"} err="failed to get container status \"5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf\": rpc error: code = NotFound desc = could not find container \"5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf\": container with ID starting with 5e8346a927bf0b64e0df4a72d14b728fa76858c9b3c229fa8324bf239d3cb9bf not found: ID does not exist" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.652744 4884 scope.go:117] "RemoveContainer" containerID="68006ac4fa818621e3f9dc9a050dc9df1db88fde8cfe81dbfac93cf99742a4b9" Dec 10 02:06:12 crc kubenswrapper[4884]: E1210 02:06:12.653739 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68006ac4fa818621e3f9dc9a050dc9df1db88fde8cfe81dbfac93cf99742a4b9\": container with ID starting with 68006ac4fa818621e3f9dc9a050dc9df1db88fde8cfe81dbfac93cf99742a4b9 not found: ID does not exist" containerID="68006ac4fa818621e3f9dc9a050dc9df1db88fde8cfe81dbfac93cf99742a4b9" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.653767 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68006ac4fa818621e3f9dc9a050dc9df1db88fde8cfe81dbfac93cf99742a4b9"} err="failed to get container status \"68006ac4fa818621e3f9dc9a050dc9df1db88fde8cfe81dbfac93cf99742a4b9\": rpc error: code = NotFound desc = could not find container \"68006ac4fa818621e3f9dc9a050dc9df1db88fde8cfe81dbfac93cf99742a4b9\": container with ID starting with 68006ac4fa818621e3f9dc9a050dc9df1db88fde8cfe81dbfac93cf99742a4b9 not found: ID does not exist" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.653782 4884 scope.go:117] "RemoveContainer" containerID="4d214faaa8f5a30485350e8347421fc713a6130d1ecc62ff4ca1379a7faf74c3" Dec 10 02:06:12 crc kubenswrapper[4884]: E1210 02:06:12.654245 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d214faaa8f5a30485350e8347421fc713a6130d1ecc62ff4ca1379a7faf74c3\": container with ID starting with 4d214faaa8f5a30485350e8347421fc713a6130d1ecc62ff4ca1379a7faf74c3 not found: ID does not exist" containerID="4d214faaa8f5a30485350e8347421fc713a6130d1ecc62ff4ca1379a7faf74c3" Dec 10 02:06:12 crc kubenswrapper[4884]: I1210 02:06:12.654267 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d214faaa8f5a30485350e8347421fc713a6130d1ecc62ff4ca1379a7faf74c3"} err="failed to get container status \"4d214faaa8f5a30485350e8347421fc713a6130d1ecc62ff4ca1379a7faf74c3\": rpc error: code = NotFound desc = could not find container \"4d214faaa8f5a30485350e8347421fc713a6130d1ecc62ff4ca1379a7faf74c3\": container with ID starting with 4d214faaa8f5a30485350e8347421fc713a6130d1ecc62ff4ca1379a7faf74c3 not found: ID does not exist" Dec 10 02:06:13 crc kubenswrapper[4884]: I1210 02:06:13.302333 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="103435a6-0b32-4643-a1fb-ab5d83889dbe" path="/var/lib/kubelet/pods/103435a6-0b32-4643-a1fb-ab5d83889dbe/volumes" Dec 10 02:06:14 crc kubenswrapper[4884]: I1210 02:06:14.287862 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:06:14 crc kubenswrapper[4884]: E1210 02:06:14.288322 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:06:17 crc kubenswrapper[4884]: I1210 02:06:17.305997 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 02:06:17 crc kubenswrapper[4884]: E1210 02:06:17.426626 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 02:06:17 crc kubenswrapper[4884]: E1210 02:06:17.426708 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 02:06:17 crc kubenswrapper[4884]: E1210 02:06:17.426897 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 02:06:17 crc kubenswrapper[4884]: E1210 02:06:17.428250 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:06:18 crc kubenswrapper[4884]: E1210 02:06:18.290801 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:06:29 crc kubenswrapper[4884]: I1210 02:06:29.290412 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:06:29 crc kubenswrapper[4884]: E1210 02:06:29.291574 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:06:30 crc kubenswrapper[4884]: E1210 02:06:30.289788 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:06:32 crc kubenswrapper[4884]: E1210 02:06:32.292140 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:06:40 crc kubenswrapper[4884]: E1210 02:06:40.848781 4884 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6999dba9_b93d_4a66_9dcf_bf46e0a7f37e.slice/crio-conmon-46f05ebeabcb55bbaefc282df0564fb9c3b45a8903ed8181e18c14af7153072c.scope\": RecentStats: unable to find data in memory cache]" Dec 10 02:06:40 crc kubenswrapper[4884]: I1210 02:06:40.886687 4884 generic.go:334] "Generic (PLEG): container finished" podID="6999dba9-b93d-4a66-9dcf-bf46e0a7f37e" containerID="46f05ebeabcb55bbaefc282df0564fb9c3b45a8903ed8181e18c14af7153072c" exitCode=0 Dec 10 02:06:40 crc kubenswrapper[4884]: I1210 02:06:40.886799 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-5p69z/must-gather-kgx8d" event={"ID":"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e","Type":"ContainerDied","Data":"46f05ebeabcb55bbaefc282df0564fb9c3b45a8903ed8181e18c14af7153072c"} Dec 10 02:06:40 crc kubenswrapper[4884]: I1210 02:06:40.888704 4884 scope.go:117] "RemoveContainer" containerID="46f05ebeabcb55bbaefc282df0564fb9c3b45a8903ed8181e18c14af7153072c" Dec 10 02:06:41 crc kubenswrapper[4884]: I1210 02:06:41.287230 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:06:41 crc kubenswrapper[4884]: E1210 02:06:41.287925 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:06:41 crc kubenswrapper[4884]: I1210 02:06:41.862357 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-5p69z_must-gather-kgx8d_6999dba9-b93d-4a66-9dcf-bf46e0a7f37e/gather/0.log" Dec 10 02:06:44 crc kubenswrapper[4884]: E1210 02:06:44.111171 4884 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.58:49470->38.102.83.58:43497: write tcp 38.102.83.58:49470->38.102.83.58:43497: write: broken pipe Dec 10 02:06:44 crc kubenswrapper[4884]: I1210 02:06:44.628530 4884 scope.go:117] "RemoveContainer" containerID="c8d259b5cf83d4fe23cf47d12b55735c4fd7d0c4dd1855063c98e60ba2d5941d" Dec 10 02:06:45 crc kubenswrapper[4884]: E1210 02:06:45.406633 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 02:06:45 crc kubenswrapper[4884]: E1210 02:06:45.407174 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 02:06:45 crc kubenswrapper[4884]: E1210 02:06:45.407375 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 02:06:45 crc kubenswrapper[4884]: E1210 02:06:45.408659 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:06:47 crc kubenswrapper[4884]: E1210 02:06:47.297778 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:06:49 crc kubenswrapper[4884]: I1210 02:06:49.879574 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-5p69z/must-gather-kgx8d"] Dec 10 02:06:49 crc kubenswrapper[4884]: I1210 02:06:49.880729 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-5p69z/must-gather-kgx8d" podUID="6999dba9-b93d-4a66-9dcf-bf46e0a7f37e" containerName="copy" containerID="cri-o://ab72a066be90c2693124223d0a94733371e0d0c2adcf865cf72287bc33721dc1" gracePeriod=2 Dec 10 02:06:49 crc kubenswrapper[4884]: I1210 02:06:49.895111 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-5p69z/must-gather-kgx8d"] Dec 10 02:06:51 crc kubenswrapper[4884]: I1210 02:06:51.070945 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-5p69z_must-gather-kgx8d_6999dba9-b93d-4a66-9dcf-bf46e0a7f37e/copy/0.log" Dec 10 02:06:51 crc kubenswrapper[4884]: I1210 02:06:51.071706 4884 generic.go:334] "Generic (PLEG): container finished" podID="6999dba9-b93d-4a66-9dcf-bf46e0a7f37e" containerID="ab72a066be90c2693124223d0a94733371e0d0c2adcf865cf72287bc33721dc1" exitCode=143 Dec 10 02:06:51 crc kubenswrapper[4884]: I1210 02:06:51.252289 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-5p69z_must-gather-kgx8d_6999dba9-b93d-4a66-9dcf-bf46e0a7f37e/copy/0.log" Dec 10 02:06:51 crc kubenswrapper[4884]: I1210 02:06:51.253157 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5p69z/must-gather-kgx8d" Dec 10 02:06:51 crc kubenswrapper[4884]: I1210 02:06:51.360742 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9gz5\" (UniqueName: \"kubernetes.io/projected/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e-kube-api-access-b9gz5\") pod \"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e\" (UID: \"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e\") " Dec 10 02:06:51 crc kubenswrapper[4884]: I1210 02:06:51.360831 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e-must-gather-output\") pod \"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e\" (UID: \"6999dba9-b93d-4a66-9dcf-bf46e0a7f37e\") " Dec 10 02:06:51 crc kubenswrapper[4884]: I1210 02:06:51.374716 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e-kube-api-access-b9gz5" (OuterVolumeSpecName: "kube-api-access-b9gz5") pod "6999dba9-b93d-4a66-9dcf-bf46e0a7f37e" (UID: "6999dba9-b93d-4a66-9dcf-bf46e0a7f37e"). InnerVolumeSpecName "kube-api-access-b9gz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 02:06:51 crc kubenswrapper[4884]: I1210 02:06:51.464162 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9gz5\" (UniqueName: \"kubernetes.io/projected/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e-kube-api-access-b9gz5\") on node \"crc\" DevicePath \"\"" Dec 10 02:06:51 crc kubenswrapper[4884]: I1210 02:06:51.542958 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "6999dba9-b93d-4a66-9dcf-bf46e0a7f37e" (UID: "6999dba9-b93d-4a66-9dcf-bf46e0a7f37e"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 02:06:51 crc kubenswrapper[4884]: I1210 02:06:51.567842 4884 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 10 02:06:52 crc kubenswrapper[4884]: I1210 02:06:52.088307 4884 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-5p69z_must-gather-kgx8d_6999dba9-b93d-4a66-9dcf-bf46e0a7f37e/copy/0.log" Dec 10 02:06:52 crc kubenswrapper[4884]: I1210 02:06:52.088965 4884 scope.go:117] "RemoveContainer" containerID="ab72a066be90c2693124223d0a94733371e0d0c2adcf865cf72287bc33721dc1" Dec 10 02:06:52 crc kubenswrapper[4884]: I1210 02:06:52.089172 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-5p69z/must-gather-kgx8d" Dec 10 02:06:52 crc kubenswrapper[4884]: I1210 02:06:52.129946 4884 scope.go:117] "RemoveContainer" containerID="46f05ebeabcb55bbaefc282df0564fb9c3b45a8903ed8181e18c14af7153072c" Dec 10 02:06:53 crc kubenswrapper[4884]: I1210 02:06:53.306209 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6999dba9-b93d-4a66-9dcf-bf46e0a7f37e" path="/var/lib/kubelet/pods/6999dba9-b93d-4a66-9dcf-bf46e0a7f37e/volumes" Dec 10 02:06:54 crc kubenswrapper[4884]: I1210 02:06:54.287531 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:06:54 crc kubenswrapper[4884]: E1210 02:06:54.287867 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:06:59 crc kubenswrapper[4884]: E1210 02:06:59.290560 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:07:01 crc kubenswrapper[4884]: E1210 02:07:01.289006 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:07:08 crc kubenswrapper[4884]: I1210 02:07:08.286813 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:07:08 crc kubenswrapper[4884]: E1210 02:07:08.287982 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:07:11 crc kubenswrapper[4884]: E1210 02:07:11.292650 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:07:14 crc kubenswrapper[4884]: E1210 02:07:14.290269 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:07:19 crc kubenswrapper[4884]: I1210 02:07:19.288593 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:07:19 crc kubenswrapper[4884]: E1210 02:07:19.289921 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:07:26 crc kubenswrapper[4884]: E1210 02:07:26.289732 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:07:29 crc kubenswrapper[4884]: E1210 02:07:29.292136 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:07:30 crc kubenswrapper[4884]: I1210 02:07:30.287513 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:07:30 crc kubenswrapper[4884]: E1210 02:07:30.287907 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:07:41 crc kubenswrapper[4884]: I1210 02:07:41.288274 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:07:41 crc kubenswrapper[4884]: E1210 02:07:41.289652 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:07:41 crc kubenswrapper[4884]: E1210 02:07:41.290488 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:07:42 crc kubenswrapper[4884]: E1210 02:07:42.292162 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:07:53 crc kubenswrapper[4884]: I1210 02:07:53.287133 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:07:53 crc kubenswrapper[4884]: E1210 02:07:53.288197 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:07:55 crc kubenswrapper[4884]: E1210 02:07:55.303664 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:07:56 crc kubenswrapper[4884]: E1210 02:07:56.290318 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:08:04 crc kubenswrapper[4884]: I1210 02:08:04.288580 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:08:04 crc kubenswrapper[4884]: E1210 02:08:04.289343 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:08:06 crc kubenswrapper[4884]: E1210 02:08:06.289593 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:08:11 crc kubenswrapper[4884]: E1210 02:08:11.290944 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:08:17 crc kubenswrapper[4884]: I1210 02:08:17.306540 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:08:17 crc kubenswrapper[4884]: E1210 02:08:17.307665 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:08:20 crc kubenswrapper[4884]: E1210 02:08:20.292868 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:08:23 crc kubenswrapper[4884]: E1210 02:08:23.292276 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:08:32 crc kubenswrapper[4884]: I1210 02:08:32.288785 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:08:32 crc kubenswrapper[4884]: E1210 02:08:32.290398 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:08:34 crc kubenswrapper[4884]: E1210 02:08:34.293592 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:08:35 crc kubenswrapper[4884]: E1210 02:08:35.294859 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:08:47 crc kubenswrapper[4884]: I1210 02:08:47.287128 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:08:47 crc kubenswrapper[4884]: E1210 02:08:47.287826 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:08:47 crc kubenswrapper[4884]: E1210 02:08:47.289604 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:08:48 crc kubenswrapper[4884]: E1210 02:08:48.289701 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:08:58 crc kubenswrapper[4884]: E1210 02:08:58.290311 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:09:00 crc kubenswrapper[4884]: E1210 02:09:00.288598 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:09:02 crc kubenswrapper[4884]: I1210 02:09:02.287808 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:09:02 crc kubenswrapper[4884]: E1210 02:09:02.288790 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:09:09 crc kubenswrapper[4884]: E1210 02:09:09.291008 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:09:13 crc kubenswrapper[4884]: E1210 02:09:13.290418 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:09:16 crc kubenswrapper[4884]: I1210 02:09:16.287224 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:09:16 crc kubenswrapper[4884]: E1210 02:09:16.288278 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:09:20 crc kubenswrapper[4884]: E1210 02:09:20.289194 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:09:24 crc kubenswrapper[4884]: E1210 02:09:24.292137 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:09:27 crc kubenswrapper[4884]: I1210 02:09:27.308692 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:09:27 crc kubenswrapper[4884]: E1210 02:09:27.309239 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:09:32 crc kubenswrapper[4884]: E1210 02:09:32.292377 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:09:35 crc kubenswrapper[4884]: E1210 02:09:35.289636 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:09:39 crc kubenswrapper[4884]: I1210 02:09:39.287730 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:09:39 crc kubenswrapper[4884]: E1210 02:09:39.289099 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:09:46 crc kubenswrapper[4884]: E1210 02:09:46.290421 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:09:47 crc kubenswrapper[4884]: E1210 02:09:47.340215 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.685664 4884 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jz5xm"] Dec 10 02:09:48 crc kubenswrapper[4884]: E1210 02:09:48.686387 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="103435a6-0b32-4643-a1fb-ab5d83889dbe" containerName="extract-content" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.686403 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="103435a6-0b32-4643-a1fb-ab5d83889dbe" containerName="extract-content" Dec 10 02:09:48 crc kubenswrapper[4884]: E1210 02:09:48.686422 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6999dba9-b93d-4a66-9dcf-bf46e0a7f37e" containerName="gather" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.686453 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6999dba9-b93d-4a66-9dcf-bf46e0a7f37e" containerName="gather" Dec 10 02:09:48 crc kubenswrapper[4884]: E1210 02:09:48.686466 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6999dba9-b93d-4a66-9dcf-bf46e0a7f37e" containerName="copy" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.686476 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="6999dba9-b93d-4a66-9dcf-bf46e0a7f37e" containerName="copy" Dec 10 02:09:48 crc kubenswrapper[4884]: E1210 02:09:48.686485 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="103435a6-0b32-4643-a1fb-ab5d83889dbe" containerName="registry-server" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.686494 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="103435a6-0b32-4643-a1fb-ab5d83889dbe" containerName="registry-server" Dec 10 02:09:48 crc kubenswrapper[4884]: E1210 02:09:48.686521 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" containerName="extract-content" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.686529 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" containerName="extract-content" Dec 10 02:09:48 crc kubenswrapper[4884]: E1210 02:09:48.686547 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" containerName="registry-server" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.686555 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" containerName="registry-server" Dec 10 02:09:48 crc kubenswrapper[4884]: E1210 02:09:48.686587 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="103435a6-0b32-4643-a1fb-ab5d83889dbe" containerName="extract-utilities" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.686596 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="103435a6-0b32-4643-a1fb-ab5d83889dbe" containerName="extract-utilities" Dec 10 02:09:48 crc kubenswrapper[4884]: E1210 02:09:48.686612 4884 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" containerName="extract-utilities" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.686622 4884 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" containerName="extract-utilities" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.686884 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="103435a6-0b32-4643-a1fb-ab5d83889dbe" containerName="registry-server" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.686918 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6999dba9-b93d-4a66-9dcf-bf46e0a7f37e" containerName="gather" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.686938 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3c30ed2-d9ff-42db-92c5-7690fc57147d" containerName="registry-server" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.686974 4884 memory_manager.go:354] "RemoveStaleState removing state" podUID="6999dba9-b93d-4a66-9dcf-bf46e0a7f37e" containerName="copy" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.689346 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.713874 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jz5xm"] Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.858783 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/271d8eb2-b618-40a6-aecc-349745300892-catalog-content\") pod \"redhat-marketplace-jz5xm\" (UID: \"271d8eb2-b618-40a6-aecc-349745300892\") " pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.859069 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/271d8eb2-b618-40a6-aecc-349745300892-utilities\") pod \"redhat-marketplace-jz5xm\" (UID: \"271d8eb2-b618-40a6-aecc-349745300892\") " pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.859243 4884 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wph7l\" (UniqueName: \"kubernetes.io/projected/271d8eb2-b618-40a6-aecc-349745300892-kube-api-access-wph7l\") pod \"redhat-marketplace-jz5xm\" (UID: \"271d8eb2-b618-40a6-aecc-349745300892\") " pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.960860 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/271d8eb2-b618-40a6-aecc-349745300892-catalog-content\") pod \"redhat-marketplace-jz5xm\" (UID: \"271d8eb2-b618-40a6-aecc-349745300892\") " pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.961214 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/271d8eb2-b618-40a6-aecc-349745300892-utilities\") pod \"redhat-marketplace-jz5xm\" (UID: \"271d8eb2-b618-40a6-aecc-349745300892\") " pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.961517 4884 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wph7l\" (UniqueName: \"kubernetes.io/projected/271d8eb2-b618-40a6-aecc-349745300892-kube-api-access-wph7l\") pod \"redhat-marketplace-jz5xm\" (UID: \"271d8eb2-b618-40a6-aecc-349745300892\") " pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.961748 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/271d8eb2-b618-40a6-aecc-349745300892-catalog-content\") pod \"redhat-marketplace-jz5xm\" (UID: \"271d8eb2-b618-40a6-aecc-349745300892\") " pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.961828 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/271d8eb2-b618-40a6-aecc-349745300892-utilities\") pod \"redhat-marketplace-jz5xm\" (UID: \"271d8eb2-b618-40a6-aecc-349745300892\") " pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:48 crc kubenswrapper[4884]: I1210 02:09:48.985082 4884 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wph7l\" (UniqueName: \"kubernetes.io/projected/271d8eb2-b618-40a6-aecc-349745300892-kube-api-access-wph7l\") pod \"redhat-marketplace-jz5xm\" (UID: \"271d8eb2-b618-40a6-aecc-349745300892\") " pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:49 crc kubenswrapper[4884]: I1210 02:09:49.023021 4884 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:49 crc kubenswrapper[4884]: I1210 02:09:49.541572 4884 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jz5xm"] Dec 10 02:09:49 crc kubenswrapper[4884]: I1210 02:09:49.653388 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jz5xm" event={"ID":"271d8eb2-b618-40a6-aecc-349745300892","Type":"ContainerStarted","Data":"eb6e71e31f2938d685b5d130ecbdc19ba18471f87511f68f3f3dc9659cbfb204"} Dec 10 02:09:50 crc kubenswrapper[4884]: I1210 02:09:50.667612 4884 generic.go:334] "Generic (PLEG): container finished" podID="271d8eb2-b618-40a6-aecc-349745300892" containerID="6e9af30b9a996e8be974ea670cee57ca6ed05d7ac234615b9a3de06e5756700d" exitCode=0 Dec 10 02:09:50 crc kubenswrapper[4884]: I1210 02:09:50.667740 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jz5xm" event={"ID":"271d8eb2-b618-40a6-aecc-349745300892","Type":"ContainerDied","Data":"6e9af30b9a996e8be974ea670cee57ca6ed05d7ac234615b9a3de06e5756700d"} Dec 10 02:09:52 crc kubenswrapper[4884]: I1210 02:09:52.690880 4884 generic.go:334] "Generic (PLEG): container finished" podID="271d8eb2-b618-40a6-aecc-349745300892" containerID="4c7f5398370438018d5433906fe21d0830ff0d977bd071d56dd8b8db5195e400" exitCode=0 Dec 10 02:09:52 crc kubenswrapper[4884]: I1210 02:09:52.690949 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jz5xm" event={"ID":"271d8eb2-b618-40a6-aecc-349745300892","Type":"ContainerDied","Data":"4c7f5398370438018d5433906fe21d0830ff0d977bd071d56dd8b8db5195e400"} Dec 10 02:09:53 crc kubenswrapper[4884]: I1210 02:09:53.707387 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jz5xm" event={"ID":"271d8eb2-b618-40a6-aecc-349745300892","Type":"ContainerStarted","Data":"ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef"} Dec 10 02:09:53 crc kubenswrapper[4884]: I1210 02:09:53.727196 4884 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jz5xm" podStartSLOduration=3.153791512 podStartE2EDuration="5.727171325s" podCreationTimestamp="2025-12-10 02:09:48 +0000 UTC" firstStartedPulling="2025-12-10 02:09:50.669982853 +0000 UTC m=+5963.747940020" lastFinishedPulling="2025-12-10 02:09:53.243362676 +0000 UTC m=+5966.321319833" observedRunningTime="2025-12-10 02:09:53.720836315 +0000 UTC m=+5966.798793442" watchObservedRunningTime="2025-12-10 02:09:53.727171325 +0000 UTC m=+5966.805128462" Dec 10 02:09:54 crc kubenswrapper[4884]: I1210 02:09:54.287470 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:09:54 crc kubenswrapper[4884]: E1210 02:09:54.288183 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:09:58 crc kubenswrapper[4884]: E1210 02:09:58.289886 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:09:59 crc kubenswrapper[4884]: I1210 02:09:59.024046 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:59 crc kubenswrapper[4884]: I1210 02:09:59.024314 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:59 crc kubenswrapper[4884]: I1210 02:09:59.090521 4884 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:59 crc kubenswrapper[4884]: I1210 02:09:59.840502 4884 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:09:59 crc kubenswrapper[4884]: I1210 02:09:59.949957 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jz5xm"] Dec 10 02:10:01 crc kubenswrapper[4884]: I1210 02:10:01.792253 4884 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jz5xm" podUID="271d8eb2-b618-40a6-aecc-349745300892" containerName="registry-server" containerID="cri-o://ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef" gracePeriod=2 Dec 10 02:10:02 crc kubenswrapper[4884]: E1210 02:10:02.289464 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.329015 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.463525 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/271d8eb2-b618-40a6-aecc-349745300892-catalog-content\") pod \"271d8eb2-b618-40a6-aecc-349745300892\" (UID: \"271d8eb2-b618-40a6-aecc-349745300892\") " Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.463562 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/271d8eb2-b618-40a6-aecc-349745300892-utilities\") pod \"271d8eb2-b618-40a6-aecc-349745300892\" (UID: \"271d8eb2-b618-40a6-aecc-349745300892\") " Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.463587 4884 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wph7l\" (UniqueName: \"kubernetes.io/projected/271d8eb2-b618-40a6-aecc-349745300892-kube-api-access-wph7l\") pod \"271d8eb2-b618-40a6-aecc-349745300892\" (UID: \"271d8eb2-b618-40a6-aecc-349745300892\") " Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.464597 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/271d8eb2-b618-40a6-aecc-349745300892-utilities" (OuterVolumeSpecName: "utilities") pod "271d8eb2-b618-40a6-aecc-349745300892" (UID: "271d8eb2-b618-40a6-aecc-349745300892"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.469070 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/271d8eb2-b618-40a6-aecc-349745300892-kube-api-access-wph7l" (OuterVolumeSpecName: "kube-api-access-wph7l") pod "271d8eb2-b618-40a6-aecc-349745300892" (UID: "271d8eb2-b618-40a6-aecc-349745300892"). InnerVolumeSpecName "kube-api-access-wph7l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.492646 4884 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/271d8eb2-b618-40a6-aecc-349745300892-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "271d8eb2-b618-40a6-aecc-349745300892" (UID: "271d8eb2-b618-40a6-aecc-349745300892"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.565943 4884 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/271d8eb2-b618-40a6-aecc-349745300892-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.565991 4884 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/271d8eb2-b618-40a6-aecc-349745300892-utilities\") on node \"crc\" DevicePath \"\"" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.566005 4884 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wph7l\" (UniqueName: \"kubernetes.io/projected/271d8eb2-b618-40a6-aecc-349745300892-kube-api-access-wph7l\") on node \"crc\" DevicePath \"\"" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.803736 4884 generic.go:334] "Generic (PLEG): container finished" podID="271d8eb2-b618-40a6-aecc-349745300892" containerID="ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef" exitCode=0 Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.803825 4884 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jz5xm" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.803808 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jz5xm" event={"ID":"271d8eb2-b618-40a6-aecc-349745300892","Type":"ContainerDied","Data":"ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef"} Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.804011 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jz5xm" event={"ID":"271d8eb2-b618-40a6-aecc-349745300892","Type":"ContainerDied","Data":"eb6e71e31f2938d685b5d130ecbdc19ba18471f87511f68f3f3dc9659cbfb204"} Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.804065 4884 scope.go:117] "RemoveContainer" containerID="ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.826407 4884 scope.go:117] "RemoveContainer" containerID="4c7f5398370438018d5433906fe21d0830ff0d977bd071d56dd8b8db5195e400" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.856554 4884 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jz5xm"] Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.868994 4884 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jz5xm"] Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.879880 4884 scope.go:117] "RemoveContainer" containerID="6e9af30b9a996e8be974ea670cee57ca6ed05d7ac234615b9a3de06e5756700d" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.924312 4884 scope.go:117] "RemoveContainer" containerID="ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef" Dec 10 02:10:02 crc kubenswrapper[4884]: E1210 02:10:02.924897 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef\": container with ID starting with ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef not found: ID does not exist" containerID="ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.924945 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef"} err="failed to get container status \"ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef\": rpc error: code = NotFound desc = could not find container \"ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef\": container with ID starting with ec1581b6e6b3e3f936cc94f81c4a5bbfac78f3a986df70b24e534b356d2e42ef not found: ID does not exist" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.924972 4884 scope.go:117] "RemoveContainer" containerID="4c7f5398370438018d5433906fe21d0830ff0d977bd071d56dd8b8db5195e400" Dec 10 02:10:02 crc kubenswrapper[4884]: E1210 02:10:02.925257 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c7f5398370438018d5433906fe21d0830ff0d977bd071d56dd8b8db5195e400\": container with ID starting with 4c7f5398370438018d5433906fe21d0830ff0d977bd071d56dd8b8db5195e400 not found: ID does not exist" containerID="4c7f5398370438018d5433906fe21d0830ff0d977bd071d56dd8b8db5195e400" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.925280 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c7f5398370438018d5433906fe21d0830ff0d977bd071d56dd8b8db5195e400"} err="failed to get container status \"4c7f5398370438018d5433906fe21d0830ff0d977bd071d56dd8b8db5195e400\": rpc error: code = NotFound desc = could not find container \"4c7f5398370438018d5433906fe21d0830ff0d977bd071d56dd8b8db5195e400\": container with ID starting with 4c7f5398370438018d5433906fe21d0830ff0d977bd071d56dd8b8db5195e400 not found: ID does not exist" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.925292 4884 scope.go:117] "RemoveContainer" containerID="6e9af30b9a996e8be974ea670cee57ca6ed05d7ac234615b9a3de06e5756700d" Dec 10 02:10:02 crc kubenswrapper[4884]: E1210 02:10:02.925512 4884 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e9af30b9a996e8be974ea670cee57ca6ed05d7ac234615b9a3de06e5756700d\": container with ID starting with 6e9af30b9a996e8be974ea670cee57ca6ed05d7ac234615b9a3de06e5756700d not found: ID does not exist" containerID="6e9af30b9a996e8be974ea670cee57ca6ed05d7ac234615b9a3de06e5756700d" Dec 10 02:10:02 crc kubenswrapper[4884]: I1210 02:10:02.925532 4884 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e9af30b9a996e8be974ea670cee57ca6ed05d7ac234615b9a3de06e5756700d"} err="failed to get container status \"6e9af30b9a996e8be974ea670cee57ca6ed05d7ac234615b9a3de06e5756700d\": rpc error: code = NotFound desc = could not find container \"6e9af30b9a996e8be974ea670cee57ca6ed05d7ac234615b9a3de06e5756700d\": container with ID starting with 6e9af30b9a996e8be974ea670cee57ca6ed05d7ac234615b9a3de06e5756700d not found: ID does not exist" Dec 10 02:10:03 crc kubenswrapper[4884]: I1210 02:10:03.304332 4884 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="271d8eb2-b618-40a6-aecc-349745300892" path="/var/lib/kubelet/pods/271d8eb2-b618-40a6-aecc-349745300892/volumes" Dec 10 02:10:08 crc kubenswrapper[4884]: I1210 02:10:08.288330 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:10:08 crc kubenswrapper[4884]: E1210 02:10:08.290231 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:10:10 crc kubenswrapper[4884]: E1210 02:10:10.289997 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:10:13 crc kubenswrapper[4884]: E1210 02:10:13.291461 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:10:20 crc kubenswrapper[4884]: I1210 02:10:20.287673 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:10:20 crc kubenswrapper[4884]: E1210 02:10:20.288459 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:10:22 crc kubenswrapper[4884]: E1210 02:10:22.289167 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:10:24 crc kubenswrapper[4884]: E1210 02:10:24.289964 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:10:32 crc kubenswrapper[4884]: I1210 02:10:32.287377 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:10:32 crc kubenswrapper[4884]: E1210 02:10:32.288529 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:10:34 crc kubenswrapper[4884]: E1210 02:10:34.288480 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:10:37 crc kubenswrapper[4884]: E1210 02:10:37.303551 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:10:46 crc kubenswrapper[4884]: E1210 02:10:46.287775 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:10:47 crc kubenswrapper[4884]: I1210 02:10:47.303383 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:10:47 crc kubenswrapper[4884]: E1210 02:10:47.304078 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-8zcgx_openshift-machine-config-operator(5eaf2b70-ff64-41f3-b879-0e50bdcd06ae)\"" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" podUID="5eaf2b70-ff64-41f3-b879-0e50bdcd06ae" Dec 10 02:10:48 crc kubenswrapper[4884]: E1210 02:10:48.290327 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:10:59 crc kubenswrapper[4884]: E1210 02:10:59.290080 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:11:01 crc kubenswrapper[4884]: I1210 02:11:01.287335 4884 scope.go:117] "RemoveContainer" containerID="0ee48ce4148a7517bb2125cc0936046692697e5f12b8b49796b57222ba11b804" Dec 10 02:11:01 crc kubenswrapper[4884]: E1210 02:11:01.290813 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:11:02 crc kubenswrapper[4884]: I1210 02:11:02.519088 4884 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-8zcgx" event={"ID":"5eaf2b70-ff64-41f3-b879-0e50bdcd06ae","Type":"ContainerStarted","Data":"6498de6da8a8c497f32a7900cf055804c1332d3e1bccfa9f26b00dd7893ee47c"} Dec 10 02:11:12 crc kubenswrapper[4884]: E1210 02:11:12.311522 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:11:16 crc kubenswrapper[4884]: E1210 02:11:16.291994 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:11:25 crc kubenswrapper[4884]: I1210 02:11:25.290614 4884 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 10 02:11:25 crc kubenswrapper[4884]: E1210 02:11:25.434610 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 02:11:25 crc kubenswrapper[4884]: E1210 02:11:25.434706 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested" Dec 10 02:11:25 crc kubenswrapper[4884]: E1210 02:11:25.434901 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hf5zm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-vf4dp_openstack(538f59ed-0b58-4b8c-8912-4ebe5ff073f7): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 02:11:25 crc kubenswrapper[4884]: E1210 02:11:25.436261 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-heat-engine: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:11:27 crc kubenswrapper[4884]: E1210 02:11:27.309737 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:11:39 crc kubenswrapper[4884]: E1210 02:11:39.289545 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:11:40 crc kubenswrapper[4884]: E1210 02:11:40.290455 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested\\\"\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" Dec 10 02:11:50 crc kubenswrapper[4884]: E1210 02:11:50.290128 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/podified-master-centos10/openstack-heat-engine:current-tested\\\"\"" pod="openstack/heat-db-sync-vf4dp" podUID="538f59ed-0b58-4b8c-8912-4ebe5ff073f7" Dec 10 02:11:52 crc kubenswrapper[4884]: E1210 02:11:52.388854 4884 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 02:11:52 crc kubenswrapper[4884]: E1210 02:11:52.389172 4884 kuberuntime_image.go:55] "Failed to pull image" err="initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" image="quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested" Dec 10 02:11:52 crc kubenswrapper[4884]: E1210 02:11:52.389329 4884 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nd8h679h7chb6h5bfh567h677h7bh59ch5bbh67h5b7h54fhb5h74h577h548h5bfh75h67ch569h694hf5h58dh85h9fhc7h645h5bh5cbhc8h5f4q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g5p9z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(3ca9531f-74d6-4baa-aca5-f734f006210b): ErrImagePull: initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine" logger="UnhandledError" Dec 10 02:11:52 crc kubenswrapper[4884]: E1210 02:11:52.390521 4884 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"initializing source docker://quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central:current-tested: reading manifest current-tested in quay.rdoproject.org/podified-master-centos10/openstack-ceilometer-central: unknown: Tag current-tested was deleted or has expired. To pull, revive via time machine\"" pod="openstack/ceilometer-0" podUID="3ca9531f-74d6-4baa-aca5-f734f006210b" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515116153362024450 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015116153362017365 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015116137125016507 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015116137125015457 5ustar corecore